summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dock.c11
-rw-r--r--drivers/acpi/ec.c36
-rw-r--r--drivers/acpi/executer/exconfig.c3
-rw-r--r--drivers/acpi/namespace/nsnames.c34
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/resources/rscalc.c3
-rw-r--r--drivers/acpi/utilities/utalloc.c8
-rw-r--r--drivers/acpi/utilities/utdelete.c13
-rw-r--r--drivers/acpi/utilities/utobject.c13
-rw-r--r--drivers/acpi/wmi.c2
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libata-core.c60
-rw-r--r--drivers/ata/libata-eh.c30
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_sc1200.c6
-rw-r--r--drivers/ata/pata_via.c59
-rw-r--r--drivers/ata/sata_mv.c37
-rw-r--r--drivers/atm/adummy.c1
-rw-r--r--drivers/base/class.c11
-rw-r--r--drivers/base/core.c31
-rw-r--r--drivers/base/driver.c3
-rw-r--r--drivers/base/power/main.c19
-rw-r--r--drivers/base/power/power.h9
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/pktcdvd.c35
-rw-r--r--drivers/bluetooth/Kconfig10
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btusb.c282
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_usb.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/cdrom/cdrom.c7
-rw-r--r--drivers/cdrom/gdrom.c7
-rw-r--r--drivers/cdrom/viocd.c7
-rw-r--r--drivers/char/agp/agp.h3
-rw-r--r--drivers/char/agp/ali-agp.c10
-rw-r--r--drivers/char/agp/amd-k7-agp.c10
-rw-r--r--drivers/char/agp/amd64-agp.c51
-rw-r--r--drivers/char/agp/ati-agp.c7
-rw-r--r--drivers/char/agp/backend.c28
-rw-r--r--drivers/char/agp/generic.c41
-rw-r--r--drivers/char/agp/intel-agp.c83
-rw-r--r--drivers/char/agp/isoch.c37
-rw-r--r--drivers/char/agp/sis-agp.c17
-rw-r--r--drivers/char/agp/sworks-agp.c25
-rw-r--r--drivers/char/agp/uninorth-agp.c32
-rw-r--r--drivers/char/hvc_console.c5
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c1
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/rtc.c1
-rw-r--r--drivers/char/synclink_gt.c1
-rw-r--r--drivers/char/tty_io.c77
-rw-r--r--drivers/char/tty_ioctl.c6
-rw-r--r--drivers/char/vt.c82
-rw-r--r--drivers/char/vt_ioctl.c4
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h1
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h1
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h1
-rw-r--r--drivers/cpuidle/governors/ladder.c26
-rw-r--r--drivers/cpuidle/governors/menu.c42
-rw-r--r--drivers/cpuidle/sysfs.c29
-rw-r--r--drivers/crypto/padlock-aes.c28
-rw-r--r--drivers/crypto/padlock-sha.c9
-rw-r--r--drivers/crypto/talitos.c54
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/edac/edac_core.h1
-rw-r--r--drivers/firewire/Kconfig4
-rw-r--r--drivers/firmware/memmap.c61
-rw-r--r--drivers/gpu/drm/drm_irq.c20
-rw-r--r--drivers/gpu/drm/drm_lock.c33
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c196
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h19
-rw-r--r--drivers/hid/usbhid/hid-quirks.c12
-rw-r--r--drivers/hwmon/Kconfig16
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru3.c134
-rw-r--r--drivers/hwmon/adcxx.c329
-rw-r--r--drivers/hwmon/applesmc.c20
-rw-r--r--drivers/hwmon/coretemp.c5
-rw-r--r--drivers/hwmon/hwmon-vid.c36
-rw-r--r--drivers/hwmon/i5k_amb.c28
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/w83791d.c3
-rw-r--r--drivers/i2c/busses/i2c-at91.c1
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/i2c/i2c-core.c18
-rw-r--r--drivers/ide/ide-cd.c20
-rw-r--r--drivers/ide/pci/aec62xx.c2
-rw-r--r--drivers/ide/pci/cy82c693.c2
-rw-r--r--drivers/ide/pci/hpt366.c2
-rw-r--r--drivers/ide/pci/it821x.c2
-rw-r--r--drivers/ide/pci/pdc202xx_new.c2
-rw-r--r--drivers/ide/pci/scc_pata.c2
-rw-r--r--drivers/ide/pci/sgiioc4.c4
-rw-r--r--drivers/ide/pci/siimage.c2
-rw-r--r--drivers/ide/pci/sis5513.c2
-rw-r--r--drivers/ide/pci/tc86c001.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/ieee1394/nodemgr.c63
-rw-r--r--drivers/ieee1394/nodemgr.h2
-rw-r--r--drivers/ieee1394/sbp2.c25
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c48
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c60
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c17
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c1
-rw-r--r--drivers/input/evdev.c63
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/bf54x-keys.c3
-rw-r--r--drivers/input/keyboard/gpio_keys.c4
-rw-r--r--drivers/input/misc/cobalt_btns.c3
-rw-r--r--drivers/input/mouse/Kconfig23
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/bcm5974.c681
-rw-r--r--drivers/input/mouse/gpio_mouse.c1
-rw-r--r--drivers/input/serio/i8042-sparcio.h3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/xilinx_ps2.c4
-rw-r--r--drivers/input/tablet/gtco.c1
-rw-r--r--drivers/input/touchscreen/Kconfig21
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c1
-rw-r--r--drivers/input/touchscreen/migor_ts.c11
-rw-r--r--drivers/input/touchscreen/wm9705.c1
-rw-r--r--drivers/input/touchscreen/wm9712.c1
-rw-r--r--drivers/input/touchscreen/wm9713.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c1
-rw-r--r--drivers/lguest/lguest_device.c8
-rw-r--r--drivers/lguest/page_tables.c25
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c32
-rw-r--r--drivers/mfd/asic3.c1
-rw-r--r--drivers/misc/acer-wmi.c24
-rw-r--r--drivers/misc/eeepc-laptop.c2
-rw-r--r--drivers/misc/eeprom_93cx6.c1
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/mmc/host/s3cmci.c17
-rw-r--r--drivers/mmc/host/sdricoh_cs.c1
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/acenic.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c6
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/atlx/atl1.c1
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/ax88796.c4
-rw-r--r--drivers/net/bnx2.c47
-rw-r--r--drivers/net/bnx2x.h87
-rw-r--r--drivers/net/bnx2x_fw_defs.h160
-rw-r--r--drivers/net/bnx2x_hsi.h16
-rw-r--r--drivers/net/bnx2x_init.h26
-rw-r--r--drivers/net/bnx2x_init_values.h533
-rw-r--r--drivers/net/bnx2x_link.c1259
-rw-r--r--drivers/net/bnx2x_link.h11
-rw-r--r--drivers/net/bnx2x_main.c1357
-rw-r--r--drivers/net/bnx2x_reg.h210
-rw-r--r--drivers/net/cpmac.c1
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_param.c81
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/netdev.c185
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/forcedeth.c4
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c8
-rw-r--r--drivers/net/fs_enet/mac-scc.c8
-rw-r--r--drivers/net/gianfar.c28
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/gianfar_sysfs.c1
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/ibmveth.c5
-rw-r--r--drivers/net/igb/e1000_82575.c1
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/igb_ethtool.c17
-rw-r--r--drivers/net/igb/igb_main.c25
-rw-r--r--drivers/net/ipg.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/loopback.c67
-rw-r--r--drivers/net/mv643xx_eth.c35
-rw-r--r--drivers/net/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ne.c4
-rw-r--r--drivers/net/netxen/netxen_nic.h8
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c1
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h2
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c59
-rw-r--r--drivers/net/netxen/netxen_nic_init.c28
-rw-r--r--drivers/net/netxen/netxen_nic_main.c210
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h2
-rw-r--r--drivers/net/ppp_mppe.c1
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/r6040.c1
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sh_eth.c1
-rw-r--r--drivers/net/skfp/ess.c6
-rw-r--r--drivers/net/sky2.c8
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c101
-rw-r--r--drivers/net/tg3.h6
-rw-r--r--drivers/net/tlan.c8
-rw-r--r--drivers/net/tokenring/lanstreamer.c1
-rw-r--r--drivers/net/tokenring/lanstreamer.h2
-rw-r--r--drivers/net/tun.c105
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/usb/Kconfig21
-rw-r--r--drivers/net/usb/hso.c56
-rw-r--r--drivers/net/usb/mcs7830.c47
-rw-r--r--drivers/net/wan/sbni.c8
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wireless/ath5k/base.c32
-rw-r--r--drivers/net/wireless/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath9k/hw.c6
-rw-r--r--drivers/net/wireless/atmel.c51
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/ipw2100.c1
-rw-r--r--drivers/net/wireless/ipw2200.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c7
-rw-r--r--drivers/net/wireless/p54/p54common.c51
-rw-r--r--drivers/net/wireless/p54/p54common.h18
-rw-r--r--drivers/net/wireless/p54/p54usb.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c1
-rw-r--r--drivers/net/wireless/rtl8187_dev.c1
-rw-r--r--drivers/of/device.c10
-rw-r--r--drivers/oprofile/cpu_buffer.c4
-rw-r--r--drivers/oprofile/event_buffer.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c38
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c21
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/shpchp_core.c34
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c7
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c35
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c49
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-bfin.c60
-rw-r--r--drivers/rtc/rtc-dev.c17
-rw-r--r--drivers/rtc/rtc-ds1374.c10
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-max6902.c2
-rw-r--r--drivers/rtc/rtc-r9701.c1
-rw-r--r--drivers/s390/block/dasd.c5
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_eer.c3
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/char/tape_std.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c20
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/cio/device.c40
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c31
-rw-r--r--drivers/s390/cio/qdio_debug.h6
-rw-r--r--drivers/s390/cio/qdio_main.c74
-rw-r--r--drivers/s390/cio/qdio_setup.c6
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/net/claw.c79
-rw-r--r--drivers/s390/net/ctcm_fsms.c56
-rw-r--r--drivers/s390/net/ctcm_main.c24
-rw-r--r--drivers/s390/net/ctcm_main.h9
-rw-r--r--drivers/s390/net/ctcm_mpc.c47
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c27
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c5
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c37
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ips.c1
-rw-r--r--drivers/scsi/ips.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c119
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/nsp32.h1
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/serial/Kconfig1
-rw-r--r--drivers/serial/bfin_5xx.c2
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/serial/sunzilog.c2
-rw-r--r--drivers/spi/spi.c40
-rw-r--r--drivers/ssb/main.c8
-rw-r--r--drivers/uio/Kconfig13
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_pdrv.c4
-rw-r--r--drivers/uio/uio_pdrv_genirq.c188
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/class/cdc-acm.c91
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/driver.c101
-rw-r--r--drivers/usb/core/hcd.c9
-rw-r--r--drivers/usb/core/hcd.h4
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/urb.c9
-rw-r--r--drivers/usb/core/usb.c73
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/amd5536udc.c1
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/f_acm.c196
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/f_serial.c2
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h6
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/gadget/u_serial.c290
-rw-r--r--drivers/usb/gadget/u_serial.h12
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/isp1760-hcd.c53
-rw-r--r--drivers/usb/host/isp1760-hcd.h5
-rw-r--r--drivers/usb/host/ohci-at91.c1
-rw-r--r--drivers/usb/host/ohci-au1xxx.c1
-rw-r--r--drivers/usb/host/ohci-ep93xx.c1
-rw-r--r--drivers/usb/host/ohci-hcd.c23
-rw-r--r--drivers/usb/host/ohci-hub.c64
-rw-r--r--drivers/usb/host/ohci-lh7a404.c1
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-pci.c133
-rw-r--r--drivers/usb/host/ohci-pnx4008.c1
-rw-r--r--drivers/usb/host/ohci-pnx8550.c1
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c1
-rw-r--r--drivers/usb/host/ohci-ps3.c1
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/ohci-q.c6
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c1
-rw-r--r--drivers/usb/host/ohci-sh.c1
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-ssb.c1
-rw-r--r--drivers/usb/host/ohci.h11
-rw-r--r--drivers/usb/host/r8a66597-hcd.c49
-rw-r--r--drivers/usb/host/u132-hcd.c11
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/auerswald.c2152
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/Kconfig175
-rw-r--r--drivers/usb/musb/Makefile69
-rw-r--r--drivers/usb/musb/cppi_dma.c1540
-rw-r--r--drivers/usb/musb/cppi_dma.h133
-rw-r--r--drivers/usb/musb/davinci.c462
-rw-r--r--drivers/usb/musb/davinci.h100
-rw-r--r--drivers/usb/musb/musb_core.c2253
-rw-r--r--drivers/usb/musb/musb_core.h488
-rw-r--r--drivers/usb/musb/musb_debug.h62
-rw-r--r--drivers/usb/musb/musb_dma.h172
-rw-r--r--drivers/usb/musb/musb_gadget.c2031
-rw-r--r--drivers/usb/musb/musb_gadget.h108
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c983
-rw-r--r--drivers/usb/musb/musb_host.c2170
-rw-r--r--drivers/usb/musb/musb_host.h110
-rw-r--r--drivers/usb/musb/musb_io.h115
-rw-r--r--drivers/usb/musb/musb_regs.h300
-rw-r--r--drivers/usb/musb/musb_virthub.c425
-rw-r--r--drivers/usb/musb/musbhsdma.c433
-rw-r--r--drivers/usb/musb/omap2430.c324
-rw-r--r--drivers/usb/musb/omap2430.h56
-rw-r--r--drivers/usb/musb/tusb6010.c1151
-rw-r--r--drivers/usb/musb/tusb6010.h233
-rw-r--r--drivers/usb/musb/tusb6010_omap.c719
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.h7
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/option.c46
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/sierra.c170
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile1
-rw-r--r--drivers/usb/storage/sierra_ms.c207
-rw-r--r--drivers/usb/storage/sierra_ms.h4
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h40
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/arkfb.c1
-rw-r--r--drivers/video/atmel_lcdfb.c13
-rw-r--r--drivers/video/aty/radeon_accel.c8
-rw-r--r--drivers/video/bf54x-lq043fb.c17
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/fb_defio.c19
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/fsl-diu-fb.c32
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c21
-rw-r--r--drivers/video/matrox/matroxfb_maven.c97
-rw-r--r--drivers/video/pm2fb.c1
-rw-r--r--drivers/video/pxafb.c68
-rw-r--r--drivers/video/s3fb.c1
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/vermilion/vermilion.h1
-rw-r--r--drivers/video/vt8623fb.c1
-rw-r--r--drivers/video/xilinxfb.c1
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/watchdog/Kconfig29
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c93
-rw-r--r--drivers/watchdog/mpc8xx_wdt.c170
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c11
-rw-r--r--drivers/watchdog/pc87413_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c344
-rw-r--r--drivers/watchdog/rdc321x_wdt.c285
-rw-r--r--drivers/watchdog/s3c2410_wdt.c18
-rw-r--r--drivers/xen/manage.c2
476 files changed, 23708 insertions, 6409 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index a280ab3..2735bde 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_USB) += usb/
+obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/gadget/
obj-$(CONFIG_SERIO) += input/serio/
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bb7c51f..7d2edf1 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -563,9 +563,6 @@ EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
*/
static int handle_eject_request(struct dock_station *ds, u32 event)
{
- if (!dock_present(ds))
- return -ENODEV;
-
if (dock_in_progress(ds))
return -EBUSY;
@@ -573,8 +570,16 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
* here we need to generate the undock
* event prior to actually doing the undock
* so that the device struct still exists.
+ * Also, even send the dock event if the
+ * device is not present anymore
*/
dock_event(ds, event, UNDOCK_EVENT);
+
+ if (!dock_present(ds)) {
+ complete_undock(ds);
+ return -ENODEV;
+ }
+
hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
undock(ds);
eject_dock(ds);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5622aee..13593f9 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -110,6 +110,31 @@ static struct acpi_ec {
u8 handlers_installed;
} *boot_ec, *first_ec;
+/*
+ * Some Asus system have exchanged ECDT data/command IO addresses.
+ */
+static int print_ecdt_error(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "ECDT has exchanged control/data I/O address\n",
+ id->ident);
+ return 0;
+}
+
+static struct dmi_system_id __cpuinitdata ec_dmi_table[] = {
+ {
+ print_ecdt_error, "Asus L4R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
+ DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
+ {
+ print_ecdt_error, "Asus M6R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "0207"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
+ DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
+ {},
+};
+
/* --------------------------------------------------------------------------
Transaction Management
-------------------------------------------------------------------------- */
@@ -196,6 +221,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
return 0;
msleep(1);
}
+ if (acpi_ec_check_status(ec,event))
+ return 0;
}
pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
acpi_ec_read_status(ec),
@@ -911,6 +938,15 @@ int __init acpi_ec_ecdt_probe(void)
pr_info(PREFIX "EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
+ if (dmi_check_system(ec_dmi_table)) {
+ /*
+ * If the board falls into ec_dmi_table, it means
+ * that ECDT table gives the incorrect command/status
+ * & data I/O address. Just fix it.
+ */
+ boot_ec->data_addr = ecdt_ptr->control.address;
+ boot_ec->command_addr = ecdt_ptr->data.address;
+ }
boot_ec->gpe = ecdt_ptr->gpe;
boot_ec->handle = ACPI_ROOT_OBJECT;
acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 2a32c84..8892b98 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -479,5 +479,8 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
acpi_tb_set_table_loaded_flag(table_index, FALSE);
+ /* Table unloaded, remove a reference to the ddb_handle object */
+
+ acpi_ut_remove_reference(ddb_handle);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 549db42..bd57738 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -56,13 +56,14 @@ ACPI_MODULE_NAME("nsnames")
* Size - Size of the pathname
* *name_buffer - Where to return the pathname
*
- * RETURN: Places the pathname into the name_buffer, in external format
+ * RETURN: Status
+ * Places the pathname into the name_buffer, in external format
* (name segments separated by path separators)
*
* DESCRIPTION: Generate a full pathaname
*
******************************************************************************/
-void
+acpi_status
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer)
{
@@ -77,7 +78,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index < ACPI_NAME_SIZE) {
name_buffer[0] = AML_ROOT_PREFIX;
name_buffer[1] = 0;
- return;
+ return (AE_OK);
}
/* Store terminator byte, then build name backwards */
@@ -105,11 +106,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index != 0) {
ACPI_ERROR((AE_INFO,
- "Could not construct pathname; index=%X, size=%X, Path=%s",
+ "Could not construct external pathname; index=%X, size=%X, Path=%s",
(u32) index, (u32) size, &name_buffer[size]));
+
+ return (AE_BAD_PARAMETER);
}
- return;
+ return (AE_OK);
}
#ifdef ACPI_DEBUG_OUTPUT
@@ -129,6 +132,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
+ acpi_status status;
char *name_buffer;
acpi_size size;
@@ -138,8 +142,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
size = acpi_ns_get_pathname_length(node);
if (!size) {
- ACPI_ERROR((AE_INFO, "Invalid node failure"));
- return_PTR(NULL);
+ return (NULL);
}
/* Allocate a buffer to be returned to caller */
@@ -152,7 +155,11 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
/* Build the path in the allocated buffer */
- acpi_ns_build_external_path(node, size, name_buffer);
+ status = acpi_ns_build_external_path(node, size, name_buffer);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
return_PTR(name_buffer);
}
#endif
@@ -186,7 +193,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
while (next_node && (next_node != acpi_gbl_root_node)) {
if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
ACPI_ERROR((AE_INFO,
- "Invalid NS Node (%p) while traversing path",
+ "Invalid Namespace Node (%p) while traversing namespace",
next_node));
return 0;
}
@@ -234,8 +241,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
required_size = acpi_ns_get_pathname_length(node);
if (!required_size) {
- ACPI_ERROR((AE_INFO, "Invalid node failure"));
- return_ACPI_STATUS(AE_ERROR);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Validate/Allocate/Clear caller buffer */
@@ -247,7 +253,11 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
/* Build the path in the caller buffer */
- acpi_ns_build_external_path(node, required_size, buffer->pointer);
+ status =
+ acpi_ns_build_external_path(node, required_size, buffer->pointer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
(char *)buffer->pointer, (u32) required_size));
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 89f3b2a..cf47805 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -849,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
if (irq < 0)
continue;
- if (irq >= ACPI_MAX_IRQS)
+ if (irq >= ARRAY_SIZE(acpi_irq_penalty))
continue;
if (used)
@@ -872,10 +872,12 @@ static int __init acpi_irq_penalty_update(char *str, int used)
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (active)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
- else
- acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (active)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
/*
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e36422a..d3f0a62 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -123,7 +123,7 @@ struct acpi_processor_errata errata __read_mostly;
static int set_no_mwait(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
- "disable mwait for CPU C-stetes\n", id->ident);
+ "disabling mwait for CPU C-states\n", id->ident);
idle_nomwait = 1;
return 0;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 283c08f..cf5b1b7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -41,7 +41,6 @@
#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
-#include <linux/cpuidle.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0133af4..80e3209 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex);
* 0 -> cpufreq low level drivers initialized -> consider _PPC values
* 1 -> ignore _PPC totally -> forced by user through boot param
*/
-static unsigned int ignore_ppc = -1;
+static int ignore_ppc = -1;
module_param(ignore_ppc, uint, 0644);
MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
"limited by BIOS, this should help");
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index f61ebc6..d9063ea 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -587,6 +587,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
} else {
temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+ if (!temp_size_needed) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
}
} else {
/*
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index e7bf34a..7dcb67e 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -242,10 +242,12 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
{
acpi_status status = AE_OK;
- if (!required_length) {
- WARN_ON(1);
- return AE_ERROR;
+ /* Parameter validation */
+
+ if (!buffer || !required_length) {
+ return (AE_BAD_PARAMETER);
}
+
switch (buffer->length) {
case ACPI_NO_BUFFER:
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index c5c791a..42609d3 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -135,6 +135,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
obj_pointer = object->package.elements;
break;
+ /*
+ * These objects have a possible list of notify handlers.
+ * Device object also may have a GPE block.
+ */
case ACPI_TYPE_DEVICE:
if (object->device.gpe_block) {
@@ -142,9 +146,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
gpe_block);
}
- /* Walk the handler list for this device */
+ /*lint -fallthrough */
+
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* Walk the notify handler list for this object */
- handler_desc = object->device.handler;
+ handler_desc = object->common_notify.handler;
while (handler_desc) {
next_desc = handler_desc->address_space.next;
acpi_ut_remove_reference(handler_desc);
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index e254844..916eff3 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -425,6 +425,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
acpi_size * obj_length)
{
acpi_size length;
+ acpi_size size;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
@@ -484,10 +485,14 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
* Get the actual length of the full pathname to this object.
* The reference will be converted to the pathname to the object
*/
- length +=
- ACPI_ROUND_UP_TO_NATIVE_WORD
- (acpi_ns_get_pathname_length
- (internal_object->reference.node));
+ size =
+ acpi_ns_get_pathname_length(internal_object->
+ reference.node);
+ if (!size) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ length += ACPI_ROUND_UP_TO_NATIVE_WORD(size);
break;
default:
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index c33b1c6..cfe2c83 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -347,7 +347,7 @@ struct acpi_buffer *out)
strcpy(method, "WQ");
strncat(method, block->object_id, 2);
- status = acpi_evaluate_object(handle, method, NULL, out);
+ status = acpi_evaluate_object(handle, method, &input, out);
/*
* If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ef3e552..c729e69 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -486,6 +486,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -575,9 +577,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
/* SiS */
- { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */
- { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */
- { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */
+ { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
+ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
+ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index c294121..b1d08a8 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -275,6 +275,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
{ } /* terminate list */
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5ba96c5..79e3a8e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -104,6 +104,7 @@ struct ata_force_param {
unsigned long xfer_mask;
unsigned int horkage_on;
unsigned int horkage_off;
+ unsigned int lflags;
};
struct ata_force_ent {
@@ -196,22 +197,23 @@ void ata_force_cbl(struct ata_port *ap)
}
/**
- * ata_force_spd_limit - force SATA spd limit according to libata.force
+ * ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
*
- * Force SATA spd limit according to libata.force and whine about
- * it. When only the port part is specified (e.g. 1:), the limit
- * applies to all links connected to both the host link and all
- * fan-out ports connected via PMP. If the device part is
- * specified as 0 (e.g. 1.00:), it specifies the first fan-out
- * link not the host link. Device number 15 always points to the
- * host link whether PMP is attached or not.
+ * Force link flags and SATA spd limit according to libata.force
+ * and whine about it. When only the port part is specified
+ * (e.g. 1:), the limit applies to all links connected to both
+ * the host link and all fan-out ports connected via PMP. If the
+ * device part is specified as 0 (e.g. 1.00:), it specifies the
+ * first fan-out link not the host link. Device number 15 always
+ * points to the host link whether PMP is attached or not.
*
* LOCKING:
* EH context.
*/
-static void ata_force_spd_limit(struct ata_link *link)
+static void ata_force_link_limits(struct ata_link *link)
{
+ bool did_spd = false;
int linkno, i;
if (ata_is_host_link(link))
@@ -228,13 +230,22 @@ static void ata_force_spd_limit(struct ata_link *link)
if (fe->device != -1 && fe->device != linkno)
continue;
- if (!fe->param.spd_limit)
- continue;
+ /* only honor the first spd limit */
+ if (!did_spd && fe->param.spd_limit) {
+ link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
+ ata_link_printk(link, KERN_NOTICE,
+ "FORCE: PHY spd limit set to %s\n",
+ fe->param.name);
+ did_spd = true;
+ }
- link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
- ata_link_printk(link, KERN_NOTICE,
- "FORCE: PHY spd limit set to %s\n", fe->param.name);
- return;
+ /* let lflags stack */
+ if (fe->param.lflags) {
+ link->flags |= fe->param.lflags;
+ ata_link_printk(link, KERN_NOTICE,
+ "FORCE: link flag 0x%x forced -> 0x%x\n",
+ fe->param.lflags, link->flags);
+ }
}
}
@@ -3277,7 +3288,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
dev->dma_mode = ata_xfer_mask2mode(dma_mask);
found = 1;
- if (dev->dma_mode != 0xff)
+ if (ata_dma_enabled(dev))
used_dma = 1;
}
if (!found)
@@ -3302,7 +3313,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
/* step 3: set host DMA timings */
ata_link_for_each_dev(dev, link) {
- if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
+ if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
continue;
dev->xfer_mode = dev->dma_mode;
@@ -5188,19 +5199,18 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
*/
int sata_link_init_spd(struct ata_link *link)
{
- u32 scontrol;
u8 spd;
int rc;
- rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+ rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
if (rc)
return rc;
- spd = (scontrol >> 4) & 0xf;
+ spd = (link->saved_scontrol >> 4) & 0xf;
if (spd)
link->hw_sata_spd_limit &= (1 << spd) - 1;
- ata_force_spd_limit(link);
+ ata_force_link_limits(link);
link->sata_spd_limit = link->hw_sata_spd_limit;
@@ -5783,9 +5793,10 @@ static void ata_port_detach(struct ata_port *ap)
ata_port_wait_eh(ap);
/* EH is now guaranteed to see UNLOADING - EH context belongs
- * to us. Disable all existing devices.
+ * to us. Restore SControl and disable all existing devices.
*/
- ata_port_for_each_link(link, ap) {
+ __ata_port_for_each_link(link, ap) {
+ sata_scr_write(link, SCR_CONTROL, link->saved_scontrol);
ata_link_for_each_dev(dev, link)
ata_dev_disable(dev);
}
@@ -5991,6 +6002,9 @@ static int __init ata_parse_force_one(char **cur,
{ "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
{ "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
{ "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
+ { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
+ { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
+ { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 58bdc53..c1db2f2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2040,7 +2040,7 @@ static void ata_eh_link_report(struct ata_link *link)
}
if (ehc->i.serror)
- ata_port_printk(ap, KERN_ERR,
+ ata_link_printk(link, KERN_ERR,
"SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
@@ -2171,18 +2171,12 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
}
static int ata_eh_followup_srst_needed(struct ata_link *link,
- int rc, int classify,
- const unsigned int *classes)
+ int rc, const unsigned int *classes)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
return 0;
- if (rc == -EAGAIN) {
- if (classify)
- return 1;
- rc = 0;
- }
- if (rc != 0)
- return 0;
+ if (rc == -EAGAIN)
+ return 1;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
return 1;
return 0;
@@ -2210,6 +2204,10 @@ int ata_eh_reset(struct ata_link *link, int classify,
*/
while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
max_tries++;
+ if (link->flags & ATA_LFLAG_NO_HRST)
+ hardreset = NULL;
+ if (link->flags & ATA_LFLAG_NO_SRST)
+ softreset = NULL;
now = jiffies;
deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN);
@@ -2247,10 +2245,10 @@ int ata_eh_reset(struct ata_link *link, int classify,
ehc->i.action &= ~ATA_EH_RESET;
if (hardreset) {
reset = hardreset;
- ehc->i.action = ATA_EH_HARDRESET;
+ ehc->i.action |= ATA_EH_HARDRESET;
} else if (softreset) {
reset = softreset;
- ehc->i.action = ATA_EH_SOFTRESET;
+ ehc->i.action |= ATA_EH_SOFTRESET;
}
if (prereset) {
@@ -2305,9 +2303,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
rc = ata_do_reset(link, reset, classes, deadline);
+ if (rc && rc != -EAGAIN)
+ goto fail;
if (reset == hardreset &&
- ata_eh_followup_srst_needed(link, rc, classify, classes)) {
+ ata_eh_followup_srst_needed(link, rc, classes)) {
/* okay, let's do follow-up softreset */
reset = softreset;
@@ -2322,10 +2322,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
rc = ata_do_reset(link, reset, classes, deadline);
}
-
- /* -EAGAIN can happen if we skipped followup SRST */
- if (rc && rc != -EAGAIN)
- goto fail;
} else {
if (verbose)
ata_link_printk(link, KERN_INFO, "no reset method "
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index fbe6057..eb919c1 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -181,7 +181,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
if (adev != acpi->last) {
pacpi_set_piomode(ap, adev);
- if (adev->dma_mode)
+ if (ata_dma_enabled(adev))
pacpi_set_dmamode(ap, adev);
acpi->last = adev;
}
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index d7de7ba..e8a0d99 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -183,7 +183,7 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
- if (adev->dma_mode >= XFER_UDMA_0)
+ if (ata_using_udma(adev))
tmp16 |= (1 << dn);
else
tmp16 &= ~(1 << dn);
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 744beeb..0c4b271 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -149,10 +149,10 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
- if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
+ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
- if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
- (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
+ if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
+ (ata_using_udma(prev) && !ata_using_udma(adev)))
/* Switch the mode bits */
cs5530_set_dmamode(ap, adev);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 27843c7..0221c9a 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -606,7 +606,7 @@ static void it821x_display_disk(int n, u8 *buf)
{
unsigned char id[41];
int mode = 0;
- char *mtype;
+ char *mtype = "";
char mbuf[8];
char *cbl = "(40 wire cable)";
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index e678af3..df64f24 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -198,7 +198,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
if (adev != ap->private_data) {
oldpiix_set_piomode(ap, adev);
- if (adev->dma_mode)
+ if (ata_dma_enabled(adev))
oldpiix_set_dmamode(ap, adev);
}
return ata_sff_qc_issue(qc);
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index cbab397..0278fd2 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -167,10 +167,10 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
- if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
+ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
- if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
- (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
+ if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
+ (ata_using_udma(prev) && !ata_using_udma(adev)))
/* Switch the mode bits */
sc1200_set_dmamode(ap, adev);
}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 57d951b..8fdb2ce 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -324,62 +324,26 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
}
/**
- * via_ata_sff_tf_load - send taskfile registers to host controller
+ * via_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
*
* Outputs ATA taskfile to standard ATA host controller.
*
* Note: This is to fix the internal bug of via chipsets, which
- * will reset the device register after changing the IEN bit on
- * ctl register
+ * will reset the device register after changing the IEN bit on
+ * ctl register
*/
-static void via_ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
-
- if (tf->ctl != ap->last_ctl) {
- iowrite8(tf->ctl, ioaddr->ctl_addr);
- iowrite8(tf->device, ioaddr->device_addr);
- ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
- }
-
- if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
- iowrite8(tf->hob_feature, ioaddr->feature_addr);
- iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
- iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
- iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
- iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
- }
+ struct ata_taskfile tmp_tf;
- if (is_addr) {
- iowrite8(tf->feature, ioaddr->feature_addr);
- iowrite8(tf->nsect, ioaddr->nsect_addr);
- iowrite8(tf->lbal, ioaddr->lbal_addr);
- iowrite8(tf->lbam, ioaddr->lbam_addr);
- iowrite8(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
+ if (ap->ctl != ap->last_ctl && !(tf->flags & ATA_TFLAG_DEVICE)) {
+ tmp_tf = *tf;
+ tmp_tf.flags |= ATA_TFLAG_DEVICE;
+ tf = &tmp_tf;
}
-
- if (tf->flags & ATA_TFLAG_DEVICE) {
- iowrite8(tf->device, ioaddr->device_addr);
- VPRINTK("device 0x%X\n", tf->device);
- }
-
- ata_wait_idle(ap);
+ ata_sff_tf_load(ap, tf);
}
static struct scsi_host_template via_sht = {
@@ -392,13 +356,12 @@ static struct ata_port_operations via_port_ops = {
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
.prereset = via_pre_reset,
- .sff_tf_load = via_ata_tf_load,
+ .sff_tf_load = via_tf_load,
};
static struct ata_port_operations via_port_ops_noirq = {
.inherits = &via_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
- .sff_tf_load = via_ata_tf_load,
};
/**
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index ad169ff..13c1d2a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1134,30 +1134,16 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
if (ap->nr_active_links == 0)
return 0;
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- /*
- * The port is operating in host queuing mode (EDMA).
- * It can accomodate a new qc if the qc protocol
- * is compatible with the current host queue mode.
- */
- if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
- /*
- * The host queue (EDMA) is in NCQ mode.
- * If the new qc is also an NCQ command,
- * then allow the new qc.
- */
- if (qc->tf.protocol == ATA_PROT_NCQ)
- return 0;
- } else {
- /*
- * The host queue (EDMA) is in non-NCQ, DMA mode.
- * If the new qc is also a non-NCQ, DMA command,
- * then allow the new qc.
- */
- if (qc->tf.protocol == ATA_PROT_DMA)
- return 0;
- }
- }
+ /*
+ * The port is operating in host queuing mode (EDMA) with NCQ
+ * enabled, allow multiple NCQ commands. EDMA also allows
+ * queueing multiple DMA commands but libata core currently
+ * doesn't allow it.
+ */
+ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
+ (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
+ return 0;
+
return ATA_DEFER_PORT;
}
@@ -3036,7 +3022,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
case chip_soc:
hpriv->ops = &mv_soc_ops;
- hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0;
+ hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
+ MV_HP_ERRATA_60X1C0;
break;
default:
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
index 2ebd07f..5effec6 100644
--- a/drivers/atm/adummy.c
+++ b/drivers/atm/adummy.c
@@ -3,7 +3,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 5667c2f..cc5e28c 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -295,6 +295,12 @@ int class_for_each_device(struct class *class, struct device *start,
if (!class)
return -EINVAL;
+ if (!class->p) {
+ WARN(1, "%s called for class '%s' before it was initialized",
+ __func__, class->name);
+ return -EINVAL;
+ }
+
mutex_lock(&class->p->class_mutex);
list_for_each_entry(dev, &class->p->class_devices, node) {
if (start) {
@@ -344,6 +350,11 @@ struct device *class_find_device(struct class *class, struct device *start,
if (!class)
return NULL;
+ if (!class->p) {
+ WARN(1, "%s called for class '%s' before it was initialized",
+ __func__, class->name);
+ return NULL;
+ }
mutex_lock(&class->p->class_mutex);
list_for_each_entry(dev, &class->p->class_devices, node) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 068aa1c..d021c98 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -53,7 +53,7 @@ static inline int device_is_not_partition(struct device *dev)
* it is attached to. If it is not attached to a bus either, an empty
* string will be returned.
*/
-const char *dev_driver_string(struct device *dev)
+const char *dev_driver_string(const struct device *dev)
{
return dev->driver ? dev->driver->name :
(dev->bus ? dev->bus->name :
@@ -541,6 +541,7 @@ void device_initialize(struct device *dev)
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
device_init_wakeup(dev, 0);
+ device_pm_init(dev);
set_dev_node(dev, -1);
}
@@ -843,13 +844,19 @@ int device_add(struct device *dev)
{
struct device *parent = NULL;
struct class_interface *class_intf;
- int error;
+ int error = -EINVAL;
dev = get_device(dev);
- if (!dev || !strlen(dev->bus_id)) {
- error = -EINVAL;
- goto Done;
- }
+ if (!dev)
+ goto done;
+
+ /* Temporarily support init_name if it is set.
+ * It will override bus_id for now */
+ if (dev->init_name)
+ dev_set_name(dev, "%s", dev->init_name);
+
+ if (!strlen(dev->bus_id))
+ goto done;
pr_debug("device: '%s': %s\n", dev->bus_id, __func__);
@@ -897,9 +904,10 @@ int device_add(struct device *dev)
error = bus_add_device(dev);
if (error)
goto BusError;
- error = device_pm_add(dev);
+ error = dpm_sysfs_add(dev);
if (error)
- goto PMError;
+ goto DPMError;
+ device_pm_add(dev);
kobject_uevent(&dev->kobj, KOBJ_ADD);
bus_attach_device(dev);
if (parent)
@@ -917,10 +925,10 @@ int device_add(struct device *dev)
class_intf->add_dev(dev, class_intf);
mutex_unlock(&dev->class->p->class_mutex);
}
- Done:
+done:
put_device(dev);
return error;
- PMError:
+ DPMError:
bus_remove_device(dev);
BusError:
if (dev->bus)
@@ -944,7 +952,7 @@ int device_add(struct device *dev)
cleanup_device_parent(dev);
if (parent)
put_device(parent);
- goto Done;
+ goto done;
}
/**
@@ -1007,6 +1015,7 @@ void device_del(struct device *dev)
struct class_interface *class_intf;
device_pm_remove(dev);
+ dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->knode_parent);
if (MAJOR(dev->devt)) {
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 2ef5acf..1e2bda7 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -16,9 +16,6 @@
#include <linux/string.h>
#include "base.h"
-#define to_dev(node) container_of(node, struct device, driver_list)
-
-
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3250c52..273a944 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -67,20 +67,16 @@ void device_pm_unlock(void)
* device_pm_add - add a device to the list of active devices
* @dev: Device to be added to the list
*/
-int device_pm_add(struct device *dev)
+void device_pm_add(struct device *dev)
{
- int error;
-
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
mutex_lock(&dpm_list_mtx);
if (dev->parent) {
- if (dev->parent->power.status >= DPM_SUSPENDING) {
- dev_warn(dev, "parent %s is sleeping, will not add\n",
+ if (dev->parent->power.status >= DPM_SUSPENDING)
+ dev_warn(dev, "parent %s should not be sleeping\n",
dev->parent->bus_id);
- WARN_ON(true);
- }
} else if (transition_started) {
/*
* We refuse to register parentless devices while a PM
@@ -89,13 +85,9 @@ int device_pm_add(struct device *dev)
*/
WARN_ON(true);
}
- error = dpm_sysfs_add(dev);
- if (!error) {
- dev->power.status = DPM_ON;
- list_add_tail(&dev->power.entry, &dpm_list);
- }
+
+ list_add_tail(&dev->power.entry, &dpm_list);
mutex_unlock(&dpm_list_mtx);
- return error;
}
/**
@@ -110,7 +102,6 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
mutex_lock(&dpm_list_mtx);
- dpm_sysfs_remove(dev);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a3252c0..41f51fa 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,3 +1,8 @@
+static inline void device_pm_init(struct device *dev)
+{
+ dev->power.status = DPM_ON;
+}
+
#ifdef CONFIG_PM_SLEEP
/*
@@ -11,12 +16,12 @@ static inline struct device *to_device(struct list_head *entry)
return container_of(entry, struct device, power.entry);
}
-extern int device_pm_add(struct device *);
+extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
#else /* CONFIG_PM_SLEEP */
-static inline int device_pm_add(struct device *dev) { return 0; }
+static inline void device_pm_add(struct device *dev) {}
static inline void device_pm_remove(struct device *dev) {}
#endif
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 24b97b0..d070d49 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -571,8 +571,8 @@ out_free:
list_del(&brd->brd_list);
brd_free(brd);
}
+ unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
- unregister_blkdev(RAMDISK_MAJOR, "brd");
return -ENOMEM;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ad98dda..1778e4a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -707,15 +707,15 @@ static int __init nbd_init(void)
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
- return -ENOMEM;
-
if (max_part < 0) {
printk(KERN_CRIT "nbd: max_part must be >= 0\n");
return -EINVAL;
}
+ nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+ if (!nbd_dev)
+ return -ENOMEM;
+
part_shift = 0;
if (max_part > 0)
part_shift = fls(max_part);
@@ -779,6 +779,7 @@ out:
blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
+ kfree(nbd_dev);
return err;
}
@@ -795,6 +796,7 @@ static void __exit nbd_cleanup(void)
}
}
unregister_blkdev(NBD_MAJOR, "nbd");
+ kfree(nbd_dev);
printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 158eed4..29b7a64 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -49,7 +49,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/file.h>
@@ -2798,14 +2797,9 @@ out_mem:
return ret;
}
-static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct pktcdvd_device *pd;
- long ret;
-
- lock_kernel();
- pd = inode->i_bdev->bd_disk->private_data;
+ struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
@@ -2818,8 +2812,7 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
- ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
- break;
+ return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
case CDROMEJECT:
/*
@@ -2828,15 +2821,14 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
- ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
- break;
+ return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
default:
VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
- ret = -ENOTTY;
+ return -ENOTTY;
}
- unlock_kernel();
- return ret;
+
+ return 0;
}
static int pkt_media_changed(struct gendisk *disk)
@@ -2858,7 +2850,7 @@ static struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
.release = pkt_close,
- .unlocked_ioctl = pkt_ioctl,
+ .ioctl = pkt_ioctl,
.media_changed = pkt_media_changed,
};
@@ -3023,8 +3015,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
mutex_unlock(&ctl_mutex);
}
-static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct pkt_ctrl_command ctrl_cmd;
@@ -3041,22 +3032,16 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
case PKT_CTRL_CMD_SETUP:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- lock_kernel();
ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
- unlock_kernel();
break;
case PKT_CTRL_CMD_TEARDOWN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- lock_kernel();
ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
- unlock_kernel();
break;
case PKT_CTRL_CMD_STATUS:
- lock_kernel();
pkt_get_status(&ctrl_cmd);
- unlock_kernel();
break;
default:
return -ENOTTY;
@@ -3069,7 +3054,7 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations pkt_ctl_fops = {
- .unlocked_ioctl = pkt_ctl_ioctl,
+ .ioctl = pkt_ctl_ioctl,
.owner = THIS_MODULE,
};
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index a235ca7..7cb4029 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -3,8 +3,8 @@ menu "Bluetooth device drivers"
depends on BT
config BT_HCIUSB
- tristate "HCI USB driver"
- depends on USB
+ tristate "HCI USB driver (old version)"
+ depends on USB && BT_HCIBTUSB=n
help
Bluetooth HCI USB driver.
This driver is required if you want to use Bluetooth devices with
@@ -23,15 +23,13 @@ config BT_HCIUSB_SCO
Say Y here to compile support for SCO over HCI USB.
config BT_HCIBTUSB
- tristate "HCI USB driver (alternate version)"
- depends on USB && EXPERIMENTAL && BT_HCIUSB=n
+ tristate "HCI USB driver"
+ depends on USB
help
Bluetooth HCI USB driver.
This driver is required if you want to use Bluetooth devices with
USB interface.
- This driver is still experimental and has no SCO support.
-
Say Y here to compile support for Bluetooth USB devices into the
kernel or say M to compile it as module (btusb).
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 593b7c5..2705847 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -60,7 +60,7 @@
/* ======================== Module parameters ======================== */
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>, Jose Orlando Pereira <jop@di.uminho.pt>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("BT3CPCC.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 95ae9ba..6a01068 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2,7 +2,7 @@
*
* Generic Bluetooth USB driver
*
- * Copyright (C) 2005-2007 Marcel Holtmann <marcel@holtmann.org>
+ * Copyright (C) 2005-2008 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -41,7 +41,7 @@
#define BT_DBG(D...)
#endif
-#define VERSION "0.2"
+#define VERSION "0.3"
static int ignore_dga;
static int ignore_csr;
@@ -160,12 +160,16 @@ static struct usb_device_id blacklist_table[] = {
{ } /* Terminating entry */
};
+#define BTUSB_MAX_ISOC_FRAMES 10
+
#define BTUSB_INTR_RUNNING 0
#define BTUSB_BULK_RUNNING 1
+#define BTUSB_ISOC_RUNNING 2
struct btusb_data {
struct hci_dev *hdev;
struct usb_device *udev;
+ struct usb_interface *isoc;
spinlock_t lock;
@@ -176,10 +180,15 @@ struct btusb_data {
struct usb_anchor tx_anchor;
struct usb_anchor intr_anchor;
struct usb_anchor bulk_anchor;
+ struct usb_anchor isoc_anchor;
struct usb_endpoint_descriptor *intr_ep;
struct usb_endpoint_descriptor *bulk_tx_ep;
struct usb_endpoint_descriptor *bulk_rx_ep;
+ struct usb_endpoint_descriptor *isoc_tx_ep;
+ struct usb_endpoint_descriptor *isoc_rx_ep;
+
+ int isoc_altsetting;
};
static void btusb_intr_complete(struct urb *urb)
@@ -195,6 +204,8 @@ static void btusb_intr_complete(struct urb *urb)
return;
if (urb->status == 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
if (hci_recv_fragment(hdev, HCI_EVENT_PKT,
urb->transfer_buffer,
urb->actual_length) < 0) {
@@ -216,7 +227,7 @@ static void btusb_intr_complete(struct urb *urb)
}
}
-static inline int btusb_submit_intr_urb(struct hci_dev *hdev)
+static int btusb_submit_intr_urb(struct hci_dev *hdev)
{
struct btusb_data *data = hdev->driver_data;
struct urb *urb;
@@ -226,6 +237,9 @@ static inline int btusb_submit_intr_urb(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!data->intr_ep)
+ return -ENODEV;
+
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
@@ -274,6 +288,8 @@ static void btusb_bulk_complete(struct urb *urb)
return;
if (urb->status == 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT,
urb->transfer_buffer,
urb->actual_length) < 0) {
@@ -295,7 +311,7 @@ static void btusb_bulk_complete(struct urb *urb)
}
}
-static inline int btusb_submit_bulk_urb(struct hci_dev *hdev)
+static int btusb_submit_bulk_urb(struct hci_dev *hdev)
{
struct btusb_data *data = hdev->driver_data;
struct urb *urb;
@@ -305,6 +321,9 @@ static inline int btusb_submit_bulk_urb(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!data->bulk_rx_ep)
+ return -ENODEV;
+
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
@@ -339,6 +358,127 @@ static inline int btusb_submit_bulk_urb(struct hci_dev *hdev)
return err;
}
+static void btusb_isoc_complete(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btusb_data *data = hdev->driver_data;
+ int i, err;
+
+ BT_DBG("%s urb %p status %d count %d", hdev->name,
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return;
+
+ if (urb->status == 0) {
+ for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned int offset = urb->iso_frame_desc[i].offset;
+ unsigned int length = urb->iso_frame_desc[i].actual_length;
+
+ if (urb->iso_frame_desc[i].status)
+ continue;
+
+ hdev->stat.byte_rx += length;
+
+ if (hci_recv_fragment(hdev, HCI_SCODATA_PKT,
+ urb->transfer_buffer + offset,
+ length) < 0) {
+ BT_ERR("%s corrupted SCO packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
+ }
+ }
+
+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
+ return;
+
+ usb_anchor_urb(urb, &data->isoc_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ BT_ERR("%s urb %p failed to resubmit (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+{
+ int i, offset = 0;
+
+ BT_DBG("len %d mtu %d", len, mtu);
+
+ for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu;
+ i++, offset += mtu, len -= mtu) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = mtu;
+ }
+
+ if (len && i < BTUSB_MAX_ISOC_FRAMES) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = len;
+ i++;
+ }
+
+ urb->number_of_packets = i;
+}
+
+static int btusb_submit_isoc_urb(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hdev->driver_data;
+ struct urb *urb;
+ unsigned char *buf;
+ unsigned int pipe;
+ int err, size;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!data->isoc_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) *
+ BTUSB_MAX_ISOC_FRAMES;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
+
+ urb->dev = data->udev;
+ urb->pipe = pipe;
+ urb->context = hdev;
+ urb->complete = btusb_isoc_complete;
+ urb->interval = data->isoc_rx_ep->bInterval;
+
+ urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = size;
+
+ __fill_isoc_descriptor(urb, size,
+ le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
+
+ usb_anchor_urb(urb, &data->isoc_anchor);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err < 0) {
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ kfree(buf);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
static void btusb_tx_complete(struct urb *urb)
{
struct sk_buff *skb = urb->context;
@@ -392,6 +532,9 @@ static int btusb_close(struct hci_dev *hdev)
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->intr_anchor);
+
clear_bit(BTUSB_BULK_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->bulk_anchor);
@@ -453,6 +596,9 @@ static int btusb_send_frame(struct sk_buff *skb)
break;
case HCI_ACLDATA_PKT:
+ if (!data->bulk_tx_ep || hdev->conn_hash.acl_num < 1)
+ return -ENODEV;
+
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
@@ -467,9 +613,31 @@ static int btusb_send_frame(struct sk_buff *skb)
break;
case HCI_SCODATA_PKT:
+ if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ pipe = usb_sndisocpipe(data->udev,
+ data->isoc_tx_ep->bEndpointAddress);
+
+ urb->dev = data->udev;
+ urb->pipe = pipe;
+ urb->context = skb;
+ urb->complete = btusb_tx_complete;
+ urb->interval = data->isoc_tx_ep->bInterval;
+
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = skb->data;
+ urb->transfer_buffer_length = skb->len;
+
+ __fill_isoc_descriptor(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
+
hdev->stat.sco_tx++;
- kfree_skb(skb);
- return 0;
+ break;
default:
return -EILSEQ;
@@ -508,22 +676,86 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
schedule_work(&data->work);
}
+static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+{
+ struct btusb_data *data = hdev->driver_data;
+ struct usb_interface *intf = data->isoc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, err;
+
+ if (!data->isoc)
+ return -ENODEV;
+
+ err = usb_set_interface(data->udev, 1, altsetting);
+ if (err < 0) {
+ BT_ERR("%s setting interface failed (%d)", hdev->name, -err);
+ return err;
+ }
+
+ data->isoc_altsetting = altsetting;
+
+ data->isoc_tx_ep = NULL;
+ data->isoc_rx_ep = NULL;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) {
+ data->isoc_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) {
+ data->isoc_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!data->isoc_tx_ep || !data->isoc_rx_ep) {
+ BT_ERR("%s invalid SCO descriptors", hdev->name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
- if (hdev->conn_hash.acl_num == 0) {
+ if (hdev->conn_hash.acl_num > 0) {
+ if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) {
+ if (btusb_submit_bulk_urb(hdev) < 0)
+ clear_bit(BTUSB_BULK_RUNNING, &data->flags);
+ else
+ btusb_submit_bulk_urb(hdev);
+ }
+ } else {
clear_bit(BTUSB_BULK_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->bulk_anchor);
- return;
}
- if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) {
- if (btusb_submit_bulk_urb(hdev) < 0)
- clear_bit(BTUSB_BULK_RUNNING, &data->flags);
- else
- btusb_submit_bulk_urb(hdev);
+ if (hdev->conn_hash.sco_num > 0) {
+ if (data->isoc_altsetting != 2) {
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ if (__set_isoc_interface(hdev, 2) < 0)
+ return;
+ }
+
+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
+ if (btusb_submit_isoc_urb(hdev) < 0)
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ else
+ btusb_submit_isoc_urb(hdev);
+ }
+ } else {
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ __set_isoc_interface(hdev, 0);
}
}
@@ -597,6 +829,7 @@ static int btusb_probe(struct usb_interface *intf,
init_usb_anchor(&data->tx_anchor);
init_usb_anchor(&data->intr_anchor);
init_usb_anchor(&data->bulk_anchor);
+ init_usb_anchor(&data->isoc_anchor);
hdev = hci_alloc_dev();
if (!hdev) {
@@ -620,6 +853,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->owner = THIS_MODULE;
+ /* interface numbers are hardcoded in the spec */
+ data->isoc = usb_ifnum_to_if(data->udev, 1);
+
if (reset || id->driver_info & BTUSB_RESET)
set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
@@ -628,11 +864,16 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
}
+ if (id->driver_info & BTUSB_BROKEN_ISOC)
+ data->isoc = NULL;
+
if (id->driver_info & BTUSB_SNIFFER) {
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = data->udev;
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+
+ data->isoc = NULL;
}
if (id->driver_info & BTUSB_BCM92035) {
@@ -646,6 +887,16 @@ static int btusb_probe(struct usb_interface *intf,
}
}
+ if (data->isoc) {
+ err = usb_driver_claim_interface(&btusb_driver,
+ data->isoc, NULL);
+ if (err < 0) {
+ hci_free_dev(hdev);
+ kfree(data);
+ return err;
+ }
+ }
+
err = hci_register_dev(hdev);
if (err < 0) {
hci_free_dev(hdev);
@@ -670,6 +921,9 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
+ if (data->isoc)
+ usb_driver_release_interface(&btusb_driver, data->isoc);
+
usb_set_intfdata(intf, NULL);
hci_unregister_dev(hdev);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 69df187..8dfcf77 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -577,7 +577,7 @@ module_exit(hci_uart_exit);
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index e397572..3c45392 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -1130,7 +1130,7 @@ module_param(isoc, int, 0644);
MODULE_PARM_DESC(isoc, "Set isochronous transfers for SCO over HCI support");
#endif
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth HCI USB driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index d97700a..7320a71 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -377,7 +377,7 @@ module_exit(vhci_exit);
module_param(minor, int, 0444);
MODULE_PARM_DESC(minor, "Miscellaneous minor device number");
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d9d1b65..74031de 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -408,7 +408,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
ENSURE(get_last_session, CDC_MULTI_SESSION);
ENSURE(get_mcn, CDC_MCN);
ENSURE(reset, CDC_RESET);
- ENSURE(audio_ioctl, CDC_PLAY_AUDIO);
ENSURE(generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdo->n_minors = 0;
@@ -2506,8 +2505,6 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
/* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
- if (!CDROM_CAN(CDC_PLAY_AUDIO))
- return -ENOSYS;
if (copy_from_user(&q, argp, sizeof(q)))
return -EFAULT;
@@ -2538,8 +2535,6 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
- if (!CDROM_CAN(CDC_PLAY_AUDIO))
- return -ENOSYS;
if (copy_from_user(&header, argp, sizeof(header)))
return -EFAULT;
@@ -2562,8 +2557,6 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
- if (!CDROM_CAN(CDC_PLAY_AUDIO))
- return -ENOSYS;
if (copy_from_user(&entry, argp, sizeof(entry)))
return -EFAULT;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1e0455b..1231d95 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -471,6 +471,12 @@ cleanup_sense_final:
return err;
}
+static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
+ void *arg)
+{
+ return -EINVAL;
+}
+
static struct cdrom_device_ops gdrom_ops = {
.open = gdrom_open,
.release = gdrom_release,
@@ -478,6 +484,7 @@ static struct cdrom_device_ops gdrom_ops = {
.media_changed = gdrom_mediachanged,
.get_last_session = gdrom_get_last_session,
.reset = gdrom_hardreset,
+ .audio_ioctl = gdrom_audio_ioctl,
.capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
.n_minors = 1,
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9d0dfe6..031e0e1 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -550,12 +550,19 @@ return_complete:
}
}
+static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
+ void *arg)
+{
+ return -EINVAL;
+}
+
static struct cdrom_device_ops viocd_dops = {
.open = viocd_open,
.release = viocd_release,
.media_changed = viocd_media_changed,
.lock_door = viocd_lock_door,
.generic_packet = viocd_packet,
+ .audio_ioctl = viocd_audio_ioctl,
.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
};
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 81e14be..4bada0e 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -148,6 +148,9 @@ struct agp_bridge_data {
char minor_version;
struct list_head list;
u32 apbase_config;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
+ spinlock_t mapped_lock;
};
#define KB(x) ((x) * 1024)
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 1ffb381..31dcd91 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -110,7 +110,8 @@ static int ali_configure(void)
nlvm_addr+= agp_bridge->gart_bus_addr;
nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
- printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
+ dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
+ nlvm_addr);
}
#endif
@@ -315,8 +316,8 @@ static int __devinit agp_ali_probe(struct pci_dev *pdev,
goto found;
}
- printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n",
- pdev->device);
+ dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
return -ENODEV;
@@ -361,8 +362,7 @@ found:
bridge->driver = &ali_generic_bridge;
}
- printk(KERN_INFO PFX "Detected ALi %s chipset\n",
- devs[j].chipset_name);
+ dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 39a0718..e280531 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -419,8 +419,8 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
return -ENODEV;
j = ent - agp_amdk7_pci_table;
- printk(KERN_INFO PFX "Detected AMD %s chipset\n",
- amd_agp_device_ids[j].chipset_name);
+ dev_info(&pdev->dev, "AMD %s chipset\n",
+ amd_agp_device_ids[j].chipset_name);
bridge = agp_alloc_bridge();
if (!bridge)
@@ -442,7 +442,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
while (!cap_ptr) {
gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
if (!gfxcard) {
- printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ dev_info(&pdev->dev, "no AGP VGA controller\n");
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
@@ -453,7 +453,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
(if necessary at all). */
if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
agp_bridge->flags |= AGP_ERRATA_1X;
- printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
+ dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
}
pci_dev_put(gfxcard);
}
@@ -469,7 +469,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
agp_bridge->flags = AGP_ERRATA_FASTWRITES;
agp_bridge->flags |= AGP_ERRATA_SBA;
agp_bridge->flags |= AGP_ERRATA_1X;
- printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
+ dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
}
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 481ffe8..7495c52 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -34,6 +34,7 @@
static struct resource *aperture_resource;
static int __initdata agp_try_unsupported = 1;
+static int agp_bridges_found;
static void amd64_tlbflush(struct agp_memory *temp)
{
@@ -293,12 +294,13 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
* so let double check that order, and lets trust the AMD NB settings
*/
if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
- printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n",
- 32 << order);
+ dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
+ 32 << order);
order = nb_order;
}
- printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order);
+ dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
+ aper, 32 << order);
if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
return -1;
@@ -319,10 +321,10 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
for (i = 0; i < num_k8_northbridges; i++) {
struct pci_dev *dev = k8_northbridges[i];
if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
- printk(KERN_ERR PFX "No usable aperture found.\n");
+ dev_err(&dev->dev, "no usable aperture found\n");
#ifdef __x86_64__
/* should port this to i386 */
- printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n");
+ dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
#endif
return -1;
}
@@ -345,14 +347,14 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data
default: revstring="??"; break;
}
- printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring);
+ dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
/*
* Work around errata.
* Chips before B2 stepping incorrectly reporting v3.5
*/
if (pdev->revision < 0x13) {
- printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n");
+ dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
bridge->major_version = 3;
bridge->minor_version = 0;
}
@@ -375,11 +377,11 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
struct pci_dev *dev1;
int i;
unsigned size = amd64_fetch_size();
- printk(KERN_INFO "Setting up ULi AGP.\n");
+
+ dev_info(&pdev->dev, "setting up ULi AGP\n");
dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
if (dev1 == NULL) {
- printk(KERN_INFO PFX "Detected a ULi chipset, "
- "but could not fine the secondary device.\n");
+ dev_info(&pdev->dev, "can't find ULi secondary device\n");
return -ENODEV;
}
@@ -388,7 +390,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
break;
if (i == ARRAY_SIZE(uli_sizes)) {
- printk(KERN_INFO PFX "No ULi size found for %d\n", size);
+ dev_info(&pdev->dev, "no ULi size found for %d\n", size);
return -ENODEV;
}
@@ -433,13 +435,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
int i;
unsigned size = amd64_fetch_size();
- printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n");
+ dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
if (dev1 == NULL) {
- printk(KERN_INFO PFX "agpgart: Detected an NVIDIA "
- "nForce3 chipset, but could not find "
- "the secondary device.\n");
+ dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
return -ENODEV;
}
@@ -448,7 +448,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
break;
if (i == ARRAY_SIZE(nforce3_sizes)) {
- printk(KERN_INFO PFX "No NForce3 size found for %d\n", size);
+ dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
return -ENODEV;
}
@@ -462,7 +462,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
- printk(KERN_INFO PFX "aperture base > 4G\n");
+ dev_info(&pdev->dev, "aperture base > 4G\n");
return -ENODEV;
}
@@ -489,6 +489,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
+ int err;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
@@ -504,7 +505,8 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
amd8151_init(pdev, bridge);
} else {
- printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn);
+ dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
}
bridge->driver = &amd_8151_driver;
@@ -536,7 +538,12 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, bridge);
- return agp_add_bridge(bridge);
+ err = agp_add_bridge(bridge);
+ if (err < 0)
+ return err;
+
+ agp_bridges_found++;
+ return 0;
}
static void __devexit agp_amd64_remove(struct pci_dev *pdev)
@@ -713,7 +720,11 @@ int __init agp_amd64_init(void)
if (agp_off)
return -EINVAL;
- if (pci_register_driver(&agp_amd64_pci_driver) < 0) {
+ err = pci_register_driver(&agp_amd64_pci_driver);
+ if (err < 0)
+ return err;
+
+ if (agp_bridges_found == 0) {
struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3a4566c..6ecbcaf 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -486,8 +486,8 @@ static int __devinit agp_ati_probe(struct pci_dev *pdev,
goto found;
}
- printk(KERN_ERR PFX
- "Unsupported Ati chipset (device id: %04x)\n", pdev->device);
+ dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
return -ENODEV;
found:
@@ -500,8 +500,7 @@ found:
bridge->driver = &ati_generic_bridge;
- printk(KERN_INFO PFX "Detected Ati %s chipset\n",
- devs[j].chipset_name);
+ dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 1ec8710..3a3cc03 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -144,7 +144,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
void *addr = bridge->driver->agp_alloc_page(bridge);
if (!addr) {
- printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
+ dev_err(&bridge->dev->dev,
+ "can't get memory for scratch page\n");
return -ENOMEM;
}
@@ -155,13 +156,13 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
size_value = bridge->driver->fetch_size();
if (size_value == 0) {
- printk(KERN_ERR PFX "unable to determine aperture size.\n");
+ dev_err(&bridge->dev->dev, "can't determine aperture size\n");
rc = -EINVAL;
goto err_out;
}
if (bridge->driver->create_gatt_table(bridge)) {
- printk(KERN_ERR PFX
- "unable to get memory for graphics translation table.\n");
+ dev_err(&bridge->dev->dev,
+ "can't get memory for graphics translation table\n");
rc = -ENOMEM;
goto err_out;
}
@@ -169,7 +170,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
bridge->key_list = vmalloc(PAGE_SIZE * 4);
if (bridge->key_list == NULL) {
- printk(KERN_ERR PFX "error allocating memory for key lists.\n");
+ dev_err(&bridge->dev->dev,
+ "can't allocate memory for key lists\n");
rc = -ENOMEM;
goto err_out;
}
@@ -179,10 +181,12 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
memset(bridge->key_list, 0, PAGE_SIZE * 4);
if (bridge->driver->configure()) {
- printk(KERN_ERR PFX "error configuring host chipset.\n");
+ dev_err(&bridge->dev->dev, "error configuring host chipset\n");
rc = -EINVAL;
goto err_out;
}
+ INIT_LIST_HEAD(&bridge->mapped_list);
+ spin_lock_init(&bridge->mapped_lock);
return 0;
@@ -269,25 +273,27 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
/* Grab reference on the chipset driver. */
if (!try_module_get(bridge->driver->owner)) {
- printk (KERN_INFO PFX "Couldn't lock chipset driver.\n");
+ dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
return -EINVAL;
}
error = agp_backend_initialize(bridge);
if (error) {
- printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
+ dev_info(&bridge->dev->dev,
+ "agp_backend_initialize() failed\n");
goto err_out;
}
if (list_empty(&agp_bridges)) {
error = agp_frontend_initialize();
if (error) {
- printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
+ dev_info(&bridge->dev->dev,
+ "agp_frontend_initialize() failed\n");
goto frontend_err;
}
- printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
- bridge->driver->fetch_size(), bridge->gart_bus_addr);
+ dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
+ bridge->driver->fetch_size(), bridge->gart_bus_addr);
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index eaa1a35..118dbde 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -429,6 +429,10 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
curr->is_bound = true;
curr->pg_start = pg_start;
+ spin_lock(&agp_bridge->mapped_lock);
+ list_add(&curr->mapped_list, &agp_bridge->mapped_list);
+ spin_unlock(&agp_bridge->mapped_lock);
+
return 0;
}
EXPORT_SYMBOL(agp_bind_memory);
@@ -461,10 +465,34 @@ int agp_unbind_memory(struct agp_memory *curr)
curr->is_bound = false;
curr->pg_start = 0;
+ spin_lock(&curr->bridge->mapped_lock);
+ list_del(&curr->mapped_list);
+ spin_unlock(&curr->bridge->mapped_lock);
return 0;
}
EXPORT_SYMBOL(agp_unbind_memory);
+/**
+ * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
+ */
+int agp_rebind_memory(void)
+{
+ struct agp_memory *curr;
+ int ret_val = 0;
+
+ spin_lock(&agp_bridge->mapped_lock);
+ list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
+ ret_val = curr->bridge->driver->insert_memory(curr,
+ curr->pg_start,
+ curr->type);
+ if (ret_val != 0)
+ break;
+ }
+ spin_unlock(&agp_bridge->mapped_lock);
+ return ret_val;
+}
+EXPORT_SYMBOL(agp_rebind_memory);
+
/* End - Routines for handling swapping of agp_memory into the GATT */
@@ -771,8 +799,8 @@ void agp_device_command(u32 bridge_agpstat, bool agp_v3)
if (!agp)
continue;
- printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
- agp_v3 ? 3 : 2, pci_name(device), mode);
+ dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
+ agp_v3 ? 3 : 2, mode);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
}
}
@@ -800,10 +828,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
get_agp_version(agp_bridge);
- printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
- agp_bridge->major_version,
- agp_bridge->minor_version,
- pci_name(agp_bridge->dev));
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
pci_read_config_dword(agp_bridge->dev,
agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
@@ -832,8 +858,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
pci_write_config_dword(bridge->dev,
bridge->capndx+AGPCTRL, temp);
- printk(KERN_INFO PFX "Device is in legacy mode,"
- " falling back to 2.x\n");
+ dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
}
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index df70264..016fdf0 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -32,8 +32,8 @@
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
-#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40
-#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
@@ -55,7 +55,7 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB)
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
@@ -161,7 +161,7 @@ static int intel_i810_fetch_size(void)
values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- printk(KERN_WARNING PFX "i810 is disabled\n");
+ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
return 0;
}
if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -193,7 +193,8 @@ static int intel_i810_configure(void)
intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
- printk(KERN_ERR PFX "Unable to remap memory.\n");
+ dev_err(&intel_private.pcidev->dev,
+ "can't remap memory\n");
return -ENOMEM;
}
}
@@ -201,7 +202,8 @@ static int intel_i810_configure(void)
if ((readl(intel_private.registers+I810_DRAM_CTL)
& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
/* This will need to be dynamically assigned */
- printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
intel_private.num_dcache_entries = 1024;
}
pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
@@ -500,8 +502,8 @@ static void intel_i830_init_gtt_entries(void)
size = 1024 + 512;
break;
default:
- printk(KERN_INFO PFX "Unknown page table size, "
- "assuming 512KB\n");
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
size = 512;
}
size += 4; /* add in BIOS popup space */
@@ -515,8 +517,8 @@ static void intel_i830_init_gtt_entries(void)
size = 2048;
break;
default:
- printk(KERN_INFO PFX "Unknown page table size 0x%x, "
- "assuming 512KB\n",
+ dev_info(&agp_bridge->dev->dev,
+ "unknown page table size 0x%x, assuming 512KB\n",
(gmch_ctrl & G33_PGETBL_SIZE_MASK));
size = 512;
}
@@ -627,11 +629,11 @@ static void intel_i830_init_gtt_entries(void)
}
}
if (gtt_entries > 0)
- printk(KERN_INFO PFX "Detected %dK %s memory.\n",
+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
gtt_entries / KB(1), local ? "local" : "stolen");
else
- printk(KERN_INFO PFX
- "No pre-allocated video memory detected.\n");
+ dev_info(&agp_bridge->dev->dev,
+ "no pre-allocated video memory detected\n");
gtt_entries /= KB(4);
intel_private.gtt_entries = gtt_entries;
@@ -801,10 +803,12 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -851,7 +855,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
return -EINVAL;
}
@@ -957,7 +962,7 @@ static void intel_i9xx_setup_flush(void)
if (intel_private.ifp_resource.start) {
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
if (!intel_private.i9xx_flush_page)
- printk(KERN_INFO "unable to ioremap flush page - no chipset flushing");
+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
}
}
@@ -1028,10 +1033,12 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -1078,7 +1085,8 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
return -EINVAL;
}
@@ -1182,7 +1190,7 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
{
switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_IGD_HB:
+ case PCI_DEVICE_ID_INTEL_GM45_HB:
case PCI_DEVICE_ID_INTEL_IGD_E_HB:
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
@@ -1379,7 +1387,7 @@ static int intel_815_configure(void)
/* the Intel 815 chipset spec. says that bits 29-31 in the
* ATTBASE register are reserved -> try not to write them */
if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
- printk(KERN_EMERG PFX "gatt bus addr too high");
+ dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
return -EINVAL;
}
@@ -2117,8 +2125,8 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0,
- "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+ "Mobile Intel? GM45 Express", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
"Intel Integrated Graphics Device", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
@@ -2163,8 +2171,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (intel_agp_chipsets[i].name == NULL) {
if (cap_ptr)
- printk(KERN_WARNING PFX "Unsupported Intel chipset"
- "(device id: %04x)\n", pdev->device);
+ dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2172,9 +2180,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (bridge->driver == NULL) {
/* bridge has no AGP and no IGD detected */
if (cap_ptr)
- printk(KERN_WARNING PFX "Failed to find bridge device "
- "(chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
+ dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
+ intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2183,8 +2190,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
bridge->capndx = cap_ptr;
bridge->dev_private_data = &intel_private;
- printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n",
- intel_agp_chipsets[i].name);
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
/*
* The following fixes the case where the BIOS has "forgotten" to
@@ -2194,7 +2200,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
r = &pdev->resource[0];
if (!r->start && r->end) {
if (pci_assign_resource(pdev, 0)) {
- printk(KERN_ERR PFX "could not assign resource 0\n");
+ dev_err(&pdev->dev, "can't assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2206,7 +2212,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
* 20030610 - hamish@zot.org
*/
if (pci_enable_device(pdev)) {
- printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ dev_err(&pdev->dev, "can't enable PCI device\n");
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -2238,6 +2244,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+ int ret_val;
pci_restore_state(pdev);
@@ -2265,6 +2272,10 @@ static int agp_intel_resume(struct pci_dev *pdev)
else if (bridge->driver == &intel_i965_driver)
intel_i915_configure();
+ ret_val = agp_rebind_memory();
+ if (ret_val != 0)
+ return ret_val;
+
return 0;
}
#endif
@@ -2315,7 +2326,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G33_HB),
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
- ID(PCI_DEVICE_ID_INTEL_IGD_HB),
+ ID(PCI_DEVICE_ID_INTEL_GM45_HB),
ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index 3f9ccde..c73385c 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -153,7 +153,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
/* Check if this configuration has any chance of working */
if (tot_bw > target.maxbw) {
- printk(KERN_ERR PFX "isochronous bandwidth required "
+ dev_err(&td->dev, "isochronous bandwidth required "
"by AGP 3.0 devices exceeds that which is supported by "
"the AGP 3.0 bridge!\n");
ret = -ENODEV;
@@ -188,7 +188,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
/* Exit if the minimal ISOCH_N allocation among the masters is more
* than the target can handle. */
if (tot_n > target.n) {
- printk(KERN_ERR PFX "number of isochronous "
+ dev_err(&td->dev, "number of isochronous "
"transactions per period required by AGP 3.0 devices "
"exceeds that which is supported by the AGP 3.0 "
"bridge!\n");
@@ -229,7 +229,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
/* Exit if the minimal RQ needs of the masters exceeds what the target
* can provide. */
if (tot_rq > rq_isoch) {
- printk(KERN_ERR PFX "number of request queue slots "
+ dev_err(&td->dev, "number of request queue slots "
"required by the isochronous bandwidth requested by "
"AGP 3.0 devices exceeds the number provided by the "
"AGP 3.0 bridge!\n");
@@ -359,8 +359,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
case 0x0001: /* Unclassified device */
/* Don't know what this is, but log it for investigation. */
if (mcapndx != 0) {
- printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n",
- dev->vendor, dev->device);
+ dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
+ pci_name(dev),
+ dev->vendor, dev->device);
}
continue;
@@ -407,17 +408,18 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
}
if (mcapndx == 0) {
- printk(KERN_ERR PFX "woah! Non-AGP device "
- "found on the secondary bus of an AGP 3.5 bridge!\n");
+ dev_err(&td->dev, "woah! Non-AGP device %s on "
+ "secondary bus of AGP 3.5 bridge!\n",
+ pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
if (mmajor < 3) {
- printk(KERN_ERR PFX "woah! AGP 2.0 device "
- "found on the secondary bus of an AGP 3.5 "
- "bridge operating with AGP 3.0 electricals!\n");
+ dev_err(&td->dev, "woah! AGP 2.0 device %s on "
+ "secondary bus of AGP 3.5 bridge operating "
+ "with AGP 3.0 electricals!\n", pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
@@ -427,10 +429,10 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
if (((mstatus >> 3) & 0x1) == 0) {
- printk(KERN_ERR PFX "woah! AGP 3.x device "
- "not operating in AGP 3.x mode found on the "
- "secondary bus of an AGP 3.5 bridge operating "
- "with AGP 3.0 electricals!\n");
+ dev_err(&td->dev, "woah! AGP 3.x device %s not "
+ "operating in AGP 3.x mode on secondary bus "
+ "of AGP 3.5 bridge operating with AGP 3.0 "
+ "electricals!\n", pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
@@ -444,9 +446,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
if (isoch) {
ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
if (ret) {
- printk(KERN_INFO PFX "Something bad happened setting "
- "up isochronous xfers. Falling back to "
- "non-isochronous xfer mode.\n");
+ dev_info(&td->dev, "something bad happened setting "
+ "up isochronous xfers; falling back to "
+ "non-isochronous xfer mode\n");
} else {
goto free_and_exit;
}
@@ -466,4 +468,3 @@ free_and_exit:
get_out:
return ret;
}
-
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index b679184..2587ef9 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -79,10 +79,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
u32 command;
int rate;
- printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
- agp_bridge->major_version,
- agp_bridge->minor_version,
- pci_name(agp_bridge->dev));
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
command = agp_collect_device_status(bridge, mode, command);
@@ -94,8 +92,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
if (!agp)
continue;
- printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
- pci_name(device), rate);
+ dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
+ pci_name(device), rate);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
@@ -105,7 +103,7 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
* cannot be configured
*/
if (device->device == bridge->dev->device) {
- printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
+ dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
msleep(10);
}
}
@@ -190,7 +188,8 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev,
return -ENODEV;
- printk(KERN_INFO PFX "Detected SiS chipset - id:%i\n", pdev->device);
+ dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
@@ -242,7 +241,7 @@ static struct pci_device_id agp_sis_pci_table[] = {
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
- .device = PCI_DEVICE_ID_SI_5591_AGP,
+ .device = PCI_DEVICE_ID_SI_5591,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 0e054c1..2fb27fe 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -241,7 +241,8 @@ static void serverworks_tlbflush(struct agp_memory *temp)
while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n");
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB post flush took more than 3 seconds\n");
break;
}
}
@@ -251,7 +252,8 @@ static void serverworks_tlbflush(struct agp_memory *temp)
while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n");
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB Dir flush took more than 3 seconds\n");
break;
}
}
@@ -271,7 +273,7 @@ static int serverworks_configure(void)
temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
if (!serverworks_private.registers) {
- printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
+ dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
return -ENOMEM;
}
@@ -451,7 +453,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
switch (pdev->device) {
case 0x0006:
- printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n");
+ dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:
@@ -461,8 +463,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
default:
if (cap_ptr)
- printk(KERN_ERR PFX "Unsupported Serverworks chipset "
- "(device id: %04x)\n", pdev->device);
+ dev_err(&pdev->dev, "unsupported Serverworks chipset "
+ "[%04x/%04x]\n", pdev->vendor, pdev->device);
return -ENODEV;
}
@@ -470,8 +472,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
PCI_DEVFN(0, 1));
if (!bridge_dev) {
- printk(KERN_INFO PFX "Detected a Serverworks chipset "
- "but could not find the secondary device.\n");
+ dev_info(&pdev->dev, "can't find secondary device\n");
return -ENODEV;
}
@@ -482,8 +483,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
if (temp2 != 0) {
- printk(KERN_INFO PFX "Detected 64 bit aperture address, "
- "but top bits are not zero. Disabling agp\n");
+ dev_info(&pdev->dev, "64 bit aperture address, "
+ "but top bits are not zero; disabling AGP\n");
return -ENODEV;
}
serverworks_private.mm_addr_ofs = 0x18;
@@ -495,8 +496,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
pci_read_config_dword(pdev,
serverworks_private.mm_addr_ofs + 4, &temp2);
if (temp2 != 0) {
- printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
- "but top bits are not zero. Disabling agp\n");
+ dev_info(&pdev->dev, "64 bit MMIO address, but top "
+ "bits are not zero; disabling AGP\n");
return -ENODEV;
}
}
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index d2fa3cf..eef7270 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -46,8 +46,8 @@ static int uninorth_fetch_size(void)
break;
if (i == agp_bridge->driver->num_aperture_sizes) {
- printk(KERN_ERR PFX "Invalid aperture size, using"
- " default\n");
+ dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
+ "using default\n");
size = 0;
aperture = NULL;
}
@@ -108,8 +108,8 @@ static int uninorth_configure(void)
current_size = A_SIZE_32(agp_bridge->current_size);
- printk(KERN_INFO PFX "configuring for size idx: %d\n",
- current_size->size_value);
+ dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
+ current_size->size_value);
/* aperture size and gatt addr */
pci_write_config_dword(agp_bridge->dev,
@@ -197,8 +197,9 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
if (gp[i]) {
- printk("u3_insert_memory: entry 0x%x occupied (%x)\n",
- i, gp[i]);
+ dev_info(&agp_bridge->dev->dev,
+ "u3_insert_memory: entry 0x%x occupied (%x)\n",
+ i, gp[i]);
return -EBUSY;
}
}
@@ -276,8 +277,8 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
&scratch);
} while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
- printk(KERN_ERR PFX "failed to write UniNorth AGP"
- " command register\n");
+ dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
+ "command register\n");
if (uninorth_rev >= 0x30) {
/* This is an AGP V3 */
@@ -330,8 +331,8 @@ static int agp_uninorth_suspend(struct pci_dev *pdev)
pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
if (!(cmd & PCI_AGP_COMMAND_AGP))
continue;
- printk("uninorth-agp: disabling AGP on device %s\n",
- pci_name(device));
+ dev_info(&pdev->dev, "disabling AGP on device %s\n",
+ pci_name(device));
cmd &= ~PCI_AGP_COMMAND_AGP;
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
}
@@ -341,8 +342,7 @@ static int agp_uninorth_suspend(struct pci_dev *pdev)
pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
bridge->dev_private_data = (void *)(long)cmd;
if (cmd & PCI_AGP_COMMAND_AGP) {
- printk("uninorth-agp: disabling AGP on bridge %s\n",
- pci_name(pdev));
+ dev_info(&pdev->dev, "disabling AGP on bridge\n");
cmd &= ~PCI_AGP_COMMAND_AGP;
pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
}
@@ -591,14 +591,14 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name != NULL; ++j) {
if (pdev->device == devs[j].device_id) {
- printk(KERN_INFO PFX "Detected Apple %s chipset\n",
- devs[j].chipset_name);
+ dev_info(&pdev->dev, "Apple %s chipset\n",
+ devs[j].chipset_name);
goto found;
}
}
- printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n",
- pdev->device);
+ dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
return -ENODEV;
found:
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 02aac10..fd64137 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -322,11 +322,10 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
hp->tty = tty;
- if (hp->ops->notifier_add)
- rc = hp->ops->notifier_add(hp, hp->data);
-
spin_unlock_irqrestore(&hp->lock, flags);
+ if (hp->ops->notifier_add)
+ rc = hp->ops->notifier_add(hp, hp->data);
/*
* If the notifier fails we return an error. The tty layer
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index f7feae4..128202e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -31,6 +31,7 @@
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
+#include <asm/i387.h>
#define PFX KBUILD_MODNAME ": "
@@ -67,16 +68,23 @@ enum {
* Another possible performance boost may come from simply buffering
* until we have 4 bytes, thus returning a u32 at a time,
* instead of the current u8-at-a-time.
+ *
+ * Padlock instructions can generate a spurious DNA fault, so
+ * we have to call them in the context of irq_ts_save/restore()
*/
static inline u32 xstore(u32 *addr, u32 edx_in)
{
u32 eax_out;
+ int ts_state;
+
+ ts_state = irq_ts_save();
asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
:"=m"(*addr), "=a"(eax_out)
:"D"(addr), "d"(edx_in));
+ irq_ts_restore(ts_state);
return eax_out;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f52931e..8e8afb6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2695,15 +2695,13 @@ static __devinit void default_find_bmc(void)
for (i = 0; ; i++) {
if (!ipmi_defaults[i].port)
break;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
#ifdef CONFIG_PPC_MERGE
if (check_legacy_ioport(ipmi_defaults[i].port))
continue;
#endif
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
info->addr_source = NULL;
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index b141450..3a23e76 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -29,7 +29,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/uaccess.h>
-#include <linux/version.h>
#include "tty.h"
#include "network.h"
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e0d0e37..1838aa3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1571,6 +1571,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
return half_md4_transform(hash, keyptr->secret);
}
+EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index d9799e2..f53d4d0 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,7 +78,6 @@
#include <linux/wait.h>
#include <linux/bcd.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/uaccess.h>
#include <asm/current.h>
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 509c89a..08911ed 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0e6866f..daeb8f7 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -2496,45 +2496,26 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tiocswinsz - implement window size set ioctl
- * @tty; tty
- * @arg: user buffer for result
+ * tty_do_resize - resize event
+ * @tty: tty being resized
+ * @real_tty: real tty (not the same as tty if using a pty/tty pair)
+ * @rows: rows (character)
+ * @cols: cols (character)
*
- * Copies the user idea of the window size to the kernel. Traditionally
- * this is just advisory information but for the Linux console it
- * actually has driver level meaning and triggers a VC resize.
- *
- * Locking:
- * Called function use the console_sem is used to ensure we do
- * not try and resize the console twice at once.
- * The tty->termios_mutex is used to ensure we don't double
- * resize and get confused. Lock order - tty->termios_mutex before
- * console sem
+ * Update the termios variables and send the neccessary signals to
+ * peform a terminal resize correctly
*/
-static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
- struct winsize __user *arg)
+int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize *ws)
{
- struct winsize tmp_ws;
struct pid *pgrp, *rpgrp;
unsigned long flags;
- if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
- return -EFAULT;
-
- mutex_lock(&tty->termios_mutex);
- if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg)))
+ /* For a PTY we need to lock the tty side */
+ mutex_lock(&real_tty->termios_mutex);
+ if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
-
-#ifdef CONFIG_VT
- if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
- if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col,
- tmp_ws.ws_row)) {
- mutex_unlock(&tty->termios_mutex);
- return -ENXIO;
- }
- }
-#endif
/* Get the PID values and reference them so we can
avoid holding the tty ctrl lock while sending signals */
spin_lock_irqsave(&tty->ctrl_lock, flags);
@@ -2550,14 +2531,42 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
put_pid(pgrp);
put_pid(rpgrp);
- tty->winsize = tmp_ws;
- real_tty->winsize = tmp_ws;
+ tty->winsize = *ws;
+ real_tty->winsize = *ws;
done:
- mutex_unlock(&tty->termios_mutex);
+ mutex_unlock(&real_tty->termios_mutex);
return 0;
}
/**
+ * tiocswinsz - implement window size set ioctl
+ * @tty; tty
+ * @arg: user buffer for result
+ *
+ * Copies the user idea of the window size to the kernel. Traditionally
+ * this is just advisory information but for the Linux console it
+ * actually has driver level meaning and triggers a VC resize.
+ *
+ * Locking:
+ * Driver dependant. The default do_resize method takes the
+ * tty termios mutex and ctrl_lock. The console takes its own lock
+ * then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize __user *arg)
+{
+ struct winsize tmp_ws;
+ if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+ return -EFAULT;
+
+ if (tty->ops->resize)
+ return tty->ops->resize(tty, real_tty, &tmp_ws);
+ else
+ return tty_do_resize(tty, real_tty, &tmp_ws);
+}
+
+/**
* tioccons - allow admin to move logical console
* @file: the file to become console
*
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index ea9fc5d..bf34e45 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -937,12 +937,14 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
return 0;
#endif
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0,
+ /* FIXME: for correctness we may need to take the termios
+ lock here - review */
+ return put_user(C_CLOCAL(real_tty) ? 1 : 0,
(int __user *)arg);
case TIOCSSOFTCAR:
if (get_user(arg, (unsigned int __user *) arg))
return -EFAULT;
- return tty_change_softcar(tty, arg);
+ return tty_change_softcar(real_tty, arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1bc00c9..60359c3 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -803,7 +803,25 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
*/
#define VC_RESIZE_MAXCOL (32767)
#define VC_RESIZE_MAXROW (32767)
-int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
+
+/**
+ * vc_do_resize - resizing method for the tty
+ * @tty: tty being resized
+ * @real_tty: real tty (different to tty if a pty/tty pair)
+ * @vc: virtual console private data
+ * @cols: columns
+ * @lines: lines
+ *
+ * Resize a virtual console, clipping according to the actual constraints.
+ * If the caller passes a tty structure then update the termios winsize
+ * information and perform any neccessary signal handling.
+ *
+ * Caller must hold the console semaphore. Takes the termios mutex and
+ * ctrl_lock of the tty IFF a tty is passed.
+ */
+
+static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct vc_data *vc, unsigned int cols, unsigned int lines)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned int old_cols, old_rows, old_row_size, old_screen_size;
@@ -907,24 +925,15 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
gotoxy(vc, vc->vc_x, vc->vc_y);
save_cur(vc);
- if (vc->vc_tty) {
- struct winsize ws, *cws = &vc->vc_tty->winsize;
- struct pid *pgrp = NULL;
-
+ if (tty) {
+ /* Rewrite the requested winsize data with the actual
+ resulting sizes */
+ struct winsize ws;
memset(&ws, 0, sizeof(ws));
ws.ws_row = vc->vc_rows;
ws.ws_col = vc->vc_cols;
ws.ws_ypixel = vc->vc_scan_lines;
-
- spin_lock_irq(&vc->vc_tty->ctrl_lock);
- if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col))
- pgrp = get_pid(vc->vc_tty->pgrp);
- spin_unlock_irq(&vc->vc_tty->ctrl_lock);
- if (pgrp) {
- kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1);
- put_pid(pgrp);
- }
- *cws = ws;
+ tty_do_resize(tty, real_tty, &ws);
}
if (CON_IS_VISIBLE(vc))
@@ -932,14 +941,47 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
return err;
}
-int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
+/**
+ * vc_resize - resize a VT
+ * @vc: virtual console
+ * @cols: columns
+ * @rows: rows
+ *
+ * Resize a virtual console as seen from the console end of things. We
+ * use the common vc_do_resize methods to update the structures. The
+ * caller must hold the console sem to protect console internals and
+ * vc->vc_tty
+ */
+
+int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
+{
+ return vc_do_resize(vc->vc_tty, vc->vc_tty, vc, cols, rows);
+}
+
+/**
+ * vt_resize - resize a VT
+ * @tty: tty to resize
+ * @real_tty: tty if a pty/tty pair
+ * @ws: winsize attributes
+ *
+ * Resize a virtual terminal. This is called by the tty layer as we
+ * register our own handler for resizing. The mutual helper does all
+ * the actual work.
+ *
+ * Takes the console sem and the called methods then take the tty
+ * termios_mutex and the tty ctrl_lock in that order.
+ */
+
+int vt_resize(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct winsize *ws)
{
- int rc;
+ struct vc_data *vc = tty->driver_data;
+ int ret;
acquire_console_sem();
- rc = vc_resize(vc, cols, lines);
+ ret = vc_do_resize(tty, real_tty, vc, ws->ws_col, ws->ws_row);
release_console_sem();
- return rc;
+ return ret;
}
void vc_deallocate(unsigned int currcons)
@@ -2907,6 +2949,7 @@ static const struct tty_operations con_ops = {
.start = con_start,
.throttle = con_throttle,
.unthrottle = con_unthrottle,
+ .resize = vt_resize,
};
int __init vty_init(void)
@@ -4061,7 +4104,6 @@ EXPORT_SYMBOL(default_blu);
EXPORT_SYMBOL(update_region);
EXPORT_SYMBOL(redraw_screen);
EXPORT_SYMBOL(vc_resize);
-EXPORT_SYMBOL(vc_lock_resize);
EXPORT_SYMBOL(fg_console);
EXPORT_SYMBOL(console_blank_hook);
EXPORT_SYMBOL(console_blanked);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 3211afd..c904e9a 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -947,14 +947,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
get_user(cc, &vtsizes->v_cols))
ret = -EFAULT;
else {
+ acquire_console_sem();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
vc = vc_cons[i].d;
if (vc) {
vc->vc_resize_user = 1;
- vc_lock_resize(vc_cons[i].d, cc, ll);
+ vc_resize(vc_cons[i].d, cc, ll);
}
}
+ release_console_sem();
}
break;
}
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
index c5b1840..8b0252b 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.h
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -38,7 +38,6 @@
#include <linux/types.h>
#include <linux/cdev.h>
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <asm/io.h>
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
index ffabd3b..62bda45 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.h
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -38,7 +38,6 @@
#include <linux/types.h>
#include <linux/cdev.h>
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <asm/io.h>
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 8bfee5f..278c985 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -74,7 +74,6 @@
* currently programmed in the FPGA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 1f9c8b0..24d0d9b 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -38,7 +38,6 @@
#include <linux/types.h>
#include <linux/cdev.h>
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <asm/io.h>
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ba7b9a6..a4bec3f 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
+ int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
if (unlikely(!ldev))
return 0;
+ /* Special case when user has set very strict latency requirement */
+ if (unlikely(latency_req == 0)) {
+ ladder_do_selection(ldev, last_idx, 0);
+ return 0;
+ }
+
last_state = &ldev->states[last_idx];
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
/* consider promotion */
if (last_idx < dev->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
- dev->states[last_idx + 1].exit_latency <=
- pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
+ dev->states[last_idx + 1].exit_latency <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev)
}
/* consider demotion */
- if (last_idx > 0 &&
+ if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+ dev->states[last_idx].exit_latency > latency_req) {
+ int i;
+
+ for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
+ if (dev->states[i].exit_latency <= latency_req)
+ break;
+ }
+ ladder_do_selection(ldev, last_idx, i);
+ return i;
+ }
+
+ if (last_idx > CPUIDLE_DRIVER_STATE_START &&
last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
struct ladder_device_state *lstate;
struct cpuidle_state *state;
- ldev->last_state_idx = 0;
+ ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
for (i = 0; i < dev->state_count; i++) {
state = &dev->states[i];
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 78d77c5..8d7cf3f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
+ int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
int i;
+ /* Special case when user has set very strict latency requirement */
+ if (unlikely(latency_req == 0)) {
+ data->last_state_idx = 0;
+ return 0;
+ }
+
/* determine the expected residency time */
data->expected_us =
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
/* find the deepest idle state that satisfies our constraints */
- for (i = 1; i < dev->state_count; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
if (s->target_residency > data->expected_us)
break;
if (s->target_residency > data->predicted_us)
break;
- if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
+ if (s->exit_latency > latency_req)
break;
}
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
- unsigned int measured_us =
- cpuidle_get_last_residency(dev) + data->elapsed_us;
+ unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &dev->states[last_idx];
+ unsigned int measured_us;
/*
* Ugh, this idle state doesn't support residency measurements, so we
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
* for one full standard timer tick. However, be aware that this
* could potentially result in a suboptimal state transition.
*/
- if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
- measured_us = USEC_PER_SEC / HZ;
+ if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
+ last_idle_us = USEC_PER_SEC / HZ;
+
+ /*
+ * measured_us and elapsed_us are the cumulative idle time, since the
+ * last time we were woken out of idle by an interrupt.
+ */
+ if (data->elapsed_us <= data->elapsed_us + last_idle_us)
+ measured_us = data->elapsed_us + last_idle_us;
+ else
+ measured_us = -1;
+
+ /* Predict time until next break event */
+ data->predicted_us = max(measured_us, data->last_measured_us);
- /* Predict time remaining until next break event */
- if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
- data->predicted_us = max(measured_us, data->last_measured_us);
+ if (last_idle_us + BREAK_FUZZ <
+ data->expected_us - target->exit_latency) {
data->last_measured_us = measured_us;
data->elapsed_us = 0;
} else {
- if (data->elapsed_us < data->elapsed_us + measured_us)
- data->elapsed_us = measured_us;
- else
- data->elapsed_us = -1;
- data->predicted_us = max(measured_us, data->last_measured_us);
+ data->elapsed_us = measured_us;
}
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 31a0e0b..97b0038 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -21,8 +21,8 @@ static int __init cpuidle_sysfs_setup(char *unused)
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-static ssize_t show_available_governors(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_available_governors(struct sysdev_class *class,
+ char *buf)
{
ssize_t i = 0;
struct cpuidle_governor *tmp;
@@ -40,8 +40,8 @@ out:
return i;
}
-static ssize_t show_current_driver(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_current_driver(struct sysdev_class *class,
+ char *buf)
{
ssize_t ret;
@@ -55,8 +55,8 @@ static ssize_t show_current_driver(struct sys_device *dev,
return ret;
}
-static ssize_t show_current_governor(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_current_governor(struct sysdev_class *class,
+ char *buf)
{
ssize_t ret;
@@ -70,9 +70,8 @@ static ssize_t show_current_governor(struct sys_device *dev,
return ret;
}
-static ssize_t store_current_governor(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_current_governor(struct sysdev_class *class,
+ const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
int ret = -EINVAL;
@@ -104,8 +103,9 @@ static ssize_t store_current_governor(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL);
-static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
+static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
+static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
+ NULL);
static struct attribute *cpuclass_default_attrs[] = {
&attr_current_driver.attr,
@@ -113,9 +113,10 @@ static struct attribute *cpuclass_default_attrs[] = {
NULL
};
-static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL);
-static SYSDEV_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
+ NULL);
+static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
+ store_current_governor);
static struct attribute *cpuclass_switch_attrs[] = {
&attr_available_governors.attr,
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 54a2a16..bf2917d 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
+#include <asm/i387.h>
#include "padlock.h"
/* Control word. */
@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
asm volatile ("pushfl; popfl");
}
+/*
+ * While the padlock instructions don't use FP/SSE registers, they
+ * generate a spurious DNA fault when cr0.ts is '1'. These instructions
+ * should be used only inside the irq_ts_save/restore() context
+ */
+
static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
void *control_word)
{
@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
+ int ts_state;
padlock_reset_key();
+
+ ts_state = irq_ts_save();
aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+ irq_ts_restore(ts_state);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
+ int ts_state;
padlock_reset_key();
+
+ ts_state = irq_ts_save();
aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+ irq_ts_restore(ts_state);
}
static struct crypto_alg aes_alg = {
@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ irq_ts_restore(ts_state);
return err;
}
@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
-
+ irq_ts_restore(ts_state);
return err;
}
@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ irq_ts_restore(ts_state);
return err;
}
@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ int ts_state;
padlock_reset_key();
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ irq_ts_restore(ts_state);
return err;
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 40d5680..a7fbade 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
+#include <asm/i387.h>
#include "padlock.h"
#define SHA1_DEFAULT_FALLBACK "sha1-generic"
@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
* PadLock microcode needs it that big. */
char buf[128+16];
char *result = NEAREST_ALIGNED(buf);
+ int ts_state;
((uint32_t *)result)[0] = SHA1_H0;
((uint32_t *)result)[1] = SHA1_H1;
@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
((uint32_t *)result)[3] = SHA1_H3;
((uint32_t *)result)[4] = SHA1_H4;
+ /* prevent taking the spurious DNA fault with padlock. */
+ ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: "+S"(in), "+D"(result)
: "c"(count), "a"(0));
+ irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
}
@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
* PadLock microcode needs it that big. */
char buf[128+16];
char *result = NEAREST_ALIGNED(buf);
+ int ts_state;
((uint32_t *)result)[0] = SHA256_H0;
((uint32_t *)result)[1] = SHA256_H1;
@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
((uint32_t *)result)[6] = SHA256_H6;
((uint32_t *)result)[7] = SHA256_H7;
+ /* prevent taking the spurious DNA fault with padlock. */
+ ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: "+S"(in), "+D"(result)
: "c"(count), "a"(0));
+ irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 681c15f..ee827a7 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -96,6 +96,9 @@ struct talitos_private {
unsigned int exec_units;
unsigned int desc_types;
+ /* SEC Compatibility info */
+ unsigned long features;
+
/* next channel to be assigned next incoming descriptor */
atomic_t last_chan;
@@ -133,6 +136,9 @@ struct talitos_private {
struct hwrng rng;
};
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
@@ -785,7 +791,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
/* copy the generated ICV to dst */
if (edesc->dma_len) {
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 1];
+ edesc->dst_nents + 2];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
icvdata, ctx->authsize);
@@ -814,7 +820,7 @@ static void ipsec_esp_decrypt_done(struct device *dev,
/* auth check */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 1];
+ edesc->dst_nents + 2];
else
icvdata = &edesc->link_tbl[0];
@@ -921,10 +927,30 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
+ struct talitos_ptr *link_tbl_ptr =
+ &edesc->link_tbl[sg_count-1];
+ struct scatterlist *sg;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
+ /* If necessary for this SEC revision,
+ * add a link table entry for ICV.
+ */
+ if ((priv->features &
+ TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
+ (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
+ link_tbl_ptr->j_extent = 0;
+ link_tbl_ptr++;
+ link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+ link_tbl_ptr->len = cpu_to_be16(authsize);
+ sg = sg_last(areq->src, edesc->src_nents ? : 1);
+ link_tbl_ptr->ptr = cpu_to_be32(
+ (char *)sg_dma_address(sg)
+ + sg->length - authsize);
+ }
} else {
/* Only one segment now, so no link tbl needed */
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -944,12 +970,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
- &edesc->link_tbl[edesc->src_nents];
- struct scatterlist *sg;
+ &edesc->link_tbl[edesc->src_nents + 1];
desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
- edesc->src_nents);
+ edesc->src_nents + 1);
if (areq->src == areq->dst) {
memcpy(link_tbl_ptr, &edesc->link_tbl[0],
edesc->src_nents * sizeof(struct talitos_ptr));
@@ -957,14 +982,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
}
+ /* Add an entry to the link table for ICV data */
link_tbl_ptr += sg_count - 1;
-
- /* handle case where sg_last contains the ICV exclusively */
- sg = sg_last(areq->dst, edesc->dst_nents);
- if (sg->length == ctx->authsize)
- link_tbl_ptr--;
-
link_tbl_ptr->j_extent = 0;
+ sg_count++;
link_tbl_ptr++;
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
link_tbl_ptr->len = cpu_to_be16(authsize);
@@ -973,7 +994,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
edesc->src_nents +
- edesc->dst_nents + 1);
+ edesc->dst_nents + 2);
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1040,12 +1061,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
/*
* allocate space for base edesc plus the link tables,
- * allowing for a separate entry for the generated ICV (+ 1),
+ * allowing for two separate entries for ICV and generated ICV (+ 2),
* and the ICV data itself
*/
alloc_len = sizeof(struct ipsec_esp_edesc);
if (src_nents || dst_nents) {
- dma_len = (src_nents + dst_nents + 1) *
+ dma_len = (src_nents + dst_nents + 2) *
sizeof(struct talitos_ptr) + ctx->authsize;
alloc_len += dma_len;
} else {
@@ -1104,7 +1125,7 @@ static int aead_authenc_decrypt(struct aead_request *req)
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 1];
+ edesc->dst_nents + 2];
else
icvdata = &edesc->link_tbl[0];
@@ -1480,6 +1501,9 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
+ if (of_device_is_compatible(np, "fsl,sec3.0"))
+ priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
+
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
GFP_KERNEL);
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a4e4494..0328da0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/memory.h>
-#include <asm/plat-orion/mv_xor.h>
+#include <plat/mv_xor.h>
#include "mv_xor.h"
static void mv_xor_issue_pending(struct dma_chan *chan);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index b27b13c..4b55ec6 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -34,7 +34,6 @@
#include <linux/platform_device.h>
#include <linux/sysdev.h>
#include <linux/workqueue.h>
-#include <linux/version.h>
#define EDAC_MC_LABEL_LEN 31
#define EDAC_DEVICE_NAME_LEN 31
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index fa6d6ab..4509024 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -12,8 +12,8 @@ config FIREWIRE
This is the "Juju" FireWire stack, a new alternative implementation
designed for robustness and simplicity. You can build either this
stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both.
- Please read http://wiki.linux1394.org/JujuMigration before you
- enable the new stack.
+ Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ before you enable the new stack.
To compile this driver as a module, say M here: the module will be
called firewire-core.
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 001622e..3bf8ee1 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -84,20 +84,23 @@ static struct kobj_type memmap_ktype = {
*/
/*
- * Firmware memory map entries
+ * Firmware memory map entries. No locking is needed because the
+ * firmware_map_add() and firmware_map_add_early() functions are called
+ * in firmware initialisation code in one single thread of execution.
*/
static LIST_HEAD(map_entries);
/**
- * Common implementation of firmware_map_add() and firmware_map_add_early()
- * which expects a pre-allocated struct firmware_map_entry.
- *
+ * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
* @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
* entry.
- */
+ *
+ * Common implementation of firmware_map_add() and firmware_map_add_early()
+ * which expects a pre-allocated struct firmware_map_entry.
+ **/
static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
const char *type,
struct firmware_map_entry *entry)
@@ -115,33 +118,52 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
return 0;
}
-/*
- * See <linux/firmware-map.h> for documentation.
- */
+/**
+ * firmware_map_add() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (inclusive).
+ * @type: Type of the memory range.
+ *
+ * This function uses kmalloc() for memory
+ * allocation. Use firmware_map_add_early() if you want to use the bootmem
+ * allocator.
+ *
+ * That function must be called before late_initcall.
+ *
+ * Returns 0 on success, or -ENOMEM if no memory could be allocated.
+ **/
int firmware_map_add(resource_size_t start, resource_size_t end,
const char *type)
{
struct firmware_map_entry *entry;
entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
- WARN_ON(!entry);
if (!entry)
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
}
-/*
- * See <linux/firmware-map.h> for documentation.
- */
+/**
+ * firmware_map_add_early() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (inclusive).
+ * @type: Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function uses the bootmem allocator
+ * for memory allocation. Use firmware_map_add() if you want to use kmalloc().
+ *
+ * That function must be called before late_initcall.
+ *
+ * Returns 0 on success, or -ENOMEM if no memory could be allocated.
+ **/
int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
const char *type)
{
struct firmware_map_entry *entry;
entry = alloc_bootmem_low(sizeof(struct firmware_map_entry));
- WARN_ON(!entry);
- if (!entry)
+ if (WARN_ON(!entry))
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
@@ -183,7 +205,10 @@ static ssize_t memmap_attr_show(struct kobject *kobj,
/*
* Initialises stuff and adds the entries in the map_entries list to
* sysfs. Important is that firmware_map_add() and firmware_map_add_early()
- * must be called before late_initcall.
+ * must be called before late_initcall. That's just because that function
+ * is called as late_initcall() function, which means that if you call
+ * firmware_map_add() or firmware_map_add_early() afterwards, the entries
+ * are not added to sysfs.
*/
static int __init memmap_init(void)
{
@@ -192,13 +217,13 @@ static int __init memmap_init(void)
struct kset *memmap_kset;
memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
- WARN_ON(!memmap_kset);
- if (!memmap_kset)
+ if (WARN_ON(!memmap_kset))
return -ENOMEM;
list_for_each_entry(entry, &map_entries, list) {
entry->kobj.kset = memmap_kset;
- kobject_add(&entry->kobj, NULL, "%d", i++);
+ if (kobject_add(&entry->kobj, NULL, "%d", i++))
+ kobject_put(&entry->kobj);
}
return 0;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 089c015..53f0e5a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -400,27 +400,31 @@ static void drm_locked_tasklet_func(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
unsigned long irqflags;
-
+ void (*tasklet_func)(struct drm_device *);
+
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+ tasklet_func = dev->locked_tasklet_func;
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
- if (!dev->locked_tasklet_func ||
+ if (!tasklet_func ||
!drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) {
- spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
}
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- dev->locked_tasklet_func(dev);
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+ tasklet_func = dev->locked_tasklet_func;
+ dev->locked_tasklet_func = NULL;
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+ if (tasklet_func != NULL)
+ tasklet_func(dev);
drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT);
-
- dev->locked_tasklet_func = NULL;
-
- spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
}
/**
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 0998723..a4caf95 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -105,14 +105,19 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
ret ? "interrupted" : "has lock");
if (ret) return ret;
- sigemptyset(&dev->sigmask);
- sigaddset(&dev->sigmask, SIGSTOP);
- sigaddset(&dev->sigmask, SIGTSTP);
- sigaddset(&dev->sigmask, SIGTTIN);
- sigaddset(&dev->sigmask, SIGTTOU);
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = dev->lock.hw_lock;
- block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+ /* don't set the block all signals on the master process for now
+ * really probably not the correct answer but lets us debug xkb
+ * xserver for now */
+ if (!file_priv->master) {
+ sigemptyset(&dev->sigmask);
+ sigaddset(&dev->sigmask, SIGSTOP);
+ sigaddset(&dev->sigmask, SIGTSTP);
+ sigaddset(&dev->sigmask, SIGTTIN);
+ sigaddset(&dev->sigmask, SIGTTOU);
+ dev->sigdata.context = lock->context;
+ dev->sigdata.lock = dev->lock.hw_lock;
+ block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+ }
if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
dev->driver->dma_ready(dev);
@@ -150,6 +155,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
unsigned long irqflags;
+ void (*tasklet_func)(struct drm_device *);
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@@ -158,14 +164,11 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
-
- if (dev->locked_tasklet_func) {
- dev->locked_tasklet_func(dev);
-
- dev->locked_tasklet_func = NULL;
- }
-
+ tasklet_func = dev->locked_tasklet_func;
+ dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+ if (tasklet_func != NULL)
+ tasklet_func(dev);
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 702df45..4b27d9a 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -77,6 +77,9 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
return -EFAULT;
}
+ box.x2--; /* Hardware expects inclusive bottom-right corner */
+ box.y2--;
+
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
box.x1 = (box.x1) &
R300_CLIPRECT_MASK;
@@ -95,8 +98,8 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
-
}
+
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
@@ -136,6 +139,18 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
ADVANCE_RING();
}
+ /* flus cache and wait idle clean after cliprect change */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FLUSH);
+ ADVANCE_RING();
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ ADVANCE_RING();
+ /* set flush flag */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
return 0;
}
@@ -166,13 +181,13 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(0x21DC, 1);
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
ADD_RANGE(R300_VAP_CLIP_X_0, 4);
- ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
+ ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
- ADD_RANGE(R300_TX_CNTL, 1);
+ ADD_RANGE(R300_TX_INVALTAGS, 1);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
@@ -388,15 +403,28 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (sz * 16 > cmdbuf->bufsz)
return -EINVAL;
- BEGIN_RING(5 + sz * 4);
- /* Wait for VAP to come to senses.. */
- /* there is no need to emit it multiple times, (only once before VAP is programmed,
- but this optimization is for later */
- OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
+ /* VAP is very sensitive so we purge cache before we program it
+ * and we also flush its state before & after */
+ BEGIN_RING(6);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FLUSH);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+ /* set flush flag */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+ BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
+ ADVANCE_RING();
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+ OUT_RING(0);
ADVANCE_RING();
cmdbuf->buf += sz * 16;
@@ -424,6 +452,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
ADVANCE_RING();
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FLUSH);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ ADVANCE_RING();
+ /* set flush flag */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
cmdbuf->buf += 8 * 4;
cmdbuf->bufsz -= 8 * 4;
@@ -543,22 +580,23 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
return 0;
}
-static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
- drm_radeon_kcmd_buffer_t *cmdbuf)
+static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
- int count, ret;
+ u32 *cmd;
+ int count;
+ int expected_count;
RING_LOCALS;
- count=(cmd[0]>>16) & 0x3fff;
+ cmd = (u32 *) cmdbuf->buf;
+ count = (cmd[0]>>16) & 0x3fff;
+ expected_count = cmd[1] >> 16;
+ if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+ expected_count = (expected_count+1)/2;
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
- return -EINVAL;
- }
- ret = !radeon_check_offset(dev_priv, cmd[2]);
- if (ret) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ if (count && count != expected_count) {
+ DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
+ count, expected_count);
return -EINVAL;
}
@@ -570,6 +608,50 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
+ if (!count) {
+ drm_r300_cmd_header_t header;
+
+ if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
+ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
+ return -EINVAL;
+ }
+
+ header.u = *(unsigned int *)cmdbuf->buf;
+
+ cmdbuf->buf += sizeof(header);
+ cmdbuf->bufsz -= sizeof(header);
+ cmd = (u32 *) cmdbuf->buf;
+
+ if (header.header.cmd_type != R300_CMD_PACKET3 ||
+ header.packet3.packet != R300_CMD_PACKET3_RAW ||
+ cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
+ return -EINVAL;
+ }
+
+ if ((cmd[1] & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+ return -EINVAL;
+ }
+ if (!radeon_check_offset(dev_priv, cmd[2])) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ return -EINVAL;
+ }
+ if (cmd[3] != expected_count) {
+ DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
+ cmd[3], expected_count);
+ return -EINVAL;
+ }
+
+ BEGIN_RING(4);
+ OUT_RING(cmd[0]);
+ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
+ ADVANCE_RING();
+
+ cmdbuf->buf += 4*4;
+ cmdbuf->bufsz -= 4*4;
+ }
+
return 0;
}
@@ -613,11 +695,22 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
- case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
- return r300_emit_indx_buffer(dev_priv, cmdbuf);
- case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
- case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
- case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
+ case RADEON_CP_INDX_BUFFER:
+ DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
+ return -EINVAL;
+ case RADEON_CP_3D_DRAW_IMMD_2:
+ /* triggers drawing using in-packet vertex data */
+ case RADEON_CP_3D_DRAW_VBUF_2:
+ /* triggers drawing of vertex buffers setup elsewhere */
+ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+ RADEON_PURGE_EMITED);
+ break;
+ case RADEON_CP_3D_DRAW_INDX_2:
+ /* triggers drawing using indices to vertex buffer */
+ /* whenever we send vertex we clear flush & purge */
+ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+ RADEON_PURGE_EMITED);
+ return r300_emit_draw_indx_2(dev_priv, cmdbuf);
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
@@ -713,17 +806,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
*/
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{
+ uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS;
- BEGIN_RING(6);
- OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
+ cache_z = R300_ZC_FLUSH;
+ cache_2d = R300_RB2D_DC_FLUSH;
+ cache_3d = R300_RB3D_DC_FLUSH;
+ if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
+ /* we can purge, primitive where draw since last purge */
+ cache_z |= R300_ZC_FREE;
+ cache_2d |= R300_RB2D_DC_FREE;
+ cache_3d |= R300_RB3D_DC_FREE;
+ }
+
+ /* flush & purge zbuffer */
+ BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
- OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE|
- R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
- OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
- OUT_RING(0x0);
+ OUT_RING(cache_z);
+ ADVANCE_RING();
+ /* flush & purge 3d */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(cache_3d);
+ ADVANCE_RING();
+ /* flush & purge texture */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+ /* FIXME: is this one really needed ? */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ ADVANCE_RING();
+ /* flush & purge 2d through E2 as RB2D will trigger lockup */
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(cache_2d);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_HOST_IDLECLEAN);
ADVANCE_RING();
+ /* set flush & purge flags */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
}
/**
@@ -905,8 +1034,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
DRM_DEBUG("\n");
- /* See the comment above r300_emit_begin3d for why this call must be here,
- * and what the cleanup gotos are for. */
+ /* pacify */
r300_pacify(dev_priv);
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index a6802f2..ee6f811 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -317,7 +317,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
* avoids bugs caused by still running shaders reading bad data from memory.
*/
-#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
+#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
/* Absolutely no clue what this register is about. */
#define R300_VAP_UNKNOWN_2288 0x2288
@@ -513,7 +513,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* gap */
/* Zero to flush caches. */
-#define R300_TX_CNTL 0x4100
+#define R300_TX_INVALTAGS 0x4100
#define R300_TX_FLUSH 0x0
/* The upper enable bits are guessed, based on fglrx reported limits. */
@@ -1362,6 +1362,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
+#define R300_RB3D_AARESOLVE_CTL 0x4E88
/* gap */
/* Guess by Vladimir.
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index f0de81a..3331f88 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -40,6 +40,7 @@
#define RADEON_FIFO_DEBUG 0
static int radeon_do_cleanup_cp(struct drm_device * dev);
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
@@ -198,23 +199,8 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
DRM_UDELAY(1);
}
} else {
- /* 3D */
- tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT);
- tmp |= RADEON_RB3D_DC_FLUSH_ALL;
- RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
-
- /* 2D */
- tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT);
- tmp |= RADEON_RB3D_DC_FLUSH_ALL;
- RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp);
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT)
- & RADEON_RB3D_DC_BUSY)) {
- return 0;
- }
- DRM_UDELAY(1);
- }
+ /* don't flush or purge cache here or lockup */
+ return 0;
}
#if RADEON_FIFO_DEBUG
@@ -237,6 +223,9 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
return 0;
DRM_UDELAY(1);
}
+ DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(RADEON_RBBM_STATUS),
+ RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
@@ -263,6 +252,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
}
DRM_UDELAY(1);
}
+ DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(RADEON_RBBM_STATUS),
+ RADEON_READ(R300_VAP_CNTL_STATUS));
#if RADEON_FIFO_DEBUG
DRM_ERROR("failed!\n");
@@ -443,14 +435,20 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
dev_priv->cp_running = 1;
- BEGIN_RING(6);
-
+ BEGIN_RING(8);
+ /* isync can only be written through cp on r5xx write it here */
+ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
+ OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
RADEON_PURGE_CACHE();
RADEON_PURGE_ZCACHE();
RADEON_WAIT_UNTIL_IDLE();
-
ADVANCE_RING();
COMMIT_RING();
+
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
}
/* Reset the Command Processor. This will not flush any pending
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3f0eca9..0993816 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -220,6 +220,9 @@ struct radeon_virt_surface {
struct drm_file *file_priv;
};
+#define RADEON_FLUSH_EMITED (1 < 0)
+#define RADEON_PURGE_EMITED (1 < 1)
+
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
drm_radeon_sarea_t *sarea_priv;
@@ -311,6 +314,7 @@ typedef struct drm_radeon_private {
unsigned long fb_aper_offset;
int num_gb_pipes;
+ int track_flush;
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
@@ -693,7 +697,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
# define R300_ZC_FLUSH (1 << 0)
# define R300_ZC_FREE (1 << 1)
-# define R300_ZC_FLUSH_ALL 0x3
# define R300_ZC_BUSY (1 << 31)
#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
# define RADEON_RB3D_DC_FLUSH (3 << 0)
@@ -701,6 +704,8 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_RB3D_DC_FLUSH_ALL 0xf
# define RADEON_RB3D_DC_BUSY (1 << 31)
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
+# define R300_RB3D_DC_FLUSH (2 << 0)
+# define R300_RB3D_DC_FREE (2 << 2)
# define R300_RB3D_DC_FINISH (1 << 4)
#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
# define RADEON_Z_TEST_MASK (7 << 4)
@@ -1246,17 +1251,17 @@ do { \
OUT_RING(RADEON_RB3D_DC_FLUSH); \
} else { \
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
- OUT_RING(RADEON_RB3D_DC_FLUSH); \
+ OUT_RING(R300_RB3D_DC_FLUSH); \
} \
} while (0)
#define RADEON_PURGE_CACHE() do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
- OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
+ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
} else { \
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
- OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
+ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \
} \
} while (0)
@@ -1273,10 +1278,10 @@ do { \
#define RADEON_PURGE_ZCACHE() do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
- OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \
+ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
} else { \
- OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
- OUT_RING(R300_ZC_FLUSH_ALL); \
+ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
} \
} while (0)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 61e78a4..b15f882 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -654,12 +654,12 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
- { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE},
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bf4ebfb..d402e8d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -77,6 +77,22 @@ config SENSORS_AD7418
This driver can also be built as a module. If so, the module
will be called ad7418.
+config SENSORS_ADCXX
+ tristate "National Semiconductor ADCxxxSxxx"
+ depends on SPI_MASTER && EXPERIMENTAL
+ help
+ If you say yes here you get support for the National Semiconductor
+ ADC<bb><c>S<sss> chip family, where
+ * bb is the resolution in number of bits (8, 10, 12)
+ * c is the number of channels (1, 2, 4, 8)
+ * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
+ kSPS and 101 for 1 MSPS)
+
+ Examples : ADC081S101, ADC124S501, ...
+
+ This driver can also be built as a module. If so, the module
+ will be called adcxx.
+
config SENSORS_ADM1021
tristate "Analog Devices ADM1021 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 7943e5c..950134a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
+obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index f00f497..d568c65 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1,5 +1,8 @@
/*
- abituguru3.c Copyright (c) 2006 Hans de Goede <j.w.r.degoede@hhs.nl>
+ abituguru3.c
+
+ Copyright (c) 2006-2008 Hans de Goede <j.w.r.degoede@hhs.nl>
+ Copyright (c) 2008 Alistair John Strachan <alistair@devzero.co.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -116,7 +119,7 @@ struct abituguru3_sensor_info {
struct abituguru3_motherboard_info {
u16 id;
- const char *name;
+ const char *dmi_name;
/* + 1 -> end of sensors indicated by a sensor with name == NULL */
struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1];
};
@@ -161,7 +164,7 @@ struct abituguru3_data {
/* Constants */
static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
- { 0x000C, "unknown", {
+ { 0x000C, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -183,7 +186,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX1 Fan", 35, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x000D, "Abit AW8", {
+ { 0x000D, NULL /* Abit AW8, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -212,7 +215,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX5 Fan", 39, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x000E, "AL-8", {
+ { 0x000E, NULL /* AL-8, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -233,7 +236,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x000F, "unknown", {
+ { 0x000F, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -254,7 +257,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0010, "Abit NI8 SLI GR", {
+ { 0x0010, NULL /* Abit NI8 SLI GR, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -276,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "OTES1 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0011, "Abit AT8 32X", {
+ { 0x0011, NULL /* Abit AT8 32X, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -302,7 +305,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX2 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0012, "Abit AN8 32X", {
+ { 0x0012, NULL /* Abit AN8 32X, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -324,7 +327,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX1 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0013, "Abit AW8D", {
+ { 0x0013, NULL /* Abit AW8D, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -353,7 +356,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX5 Fan", 39, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0014, "Abit AB9 Pro", {
+ { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -374,7 +377,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0015, "unknown", {
+ { 0x0015, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -398,7 +401,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0016, "AW9D-MAX", {
+ { 0x0016, NULL /* AW9D-MAX, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -426,7 +429,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "OTES1 Fan", 38, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0017, "unknown", {
+ { 0x0017, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -451,7 +454,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 FAN", 37, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0018, "unknown", {
+ { 0x0018, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -478,7 +481,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0019, "unknown", {
+ { 0x0019, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 7, 0, 10, 1, 0 },
{ "DDR2", 13, 0, 20, 1, 0 },
{ "DDR2 VTT", 14, 0, 10, 1, 0 },
@@ -505,7 +508,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 FAN", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001A, "Abit IP35 Pro", {
+ { 0x001A, "IP35 Pro(Intel P35-ICH9R)", {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -533,7 +536,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX4 Fan", 37, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001B, "unknown", {
+ { 0x001B, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR3", 1, 0, 20, 1, 0 },
{ "DDR3 VTT", 2, 0, 10, 1, 0 },
@@ -560,7 +563,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001C, "unknown", {
+ { 0x001C, NULL /* Unknown, need DMI string */, {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -935,9 +938,18 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
goto abituguru3_probe_error;
}
data->sensors = abituguru3_motherboards[i].sensors;
+
printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
- "ID: %04X (%s)\n", (unsigned int)id,
- abituguru3_motherboards[i].name);
+ "ID: %04X\n", (unsigned int)id);
+
+#ifdef CONFIG_DMI
+ if (!abituguru3_motherboards[i].dmi_name) {
+ printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was "
+ "not detected using DMI. Please send the output of "
+ "\"dmidecode\" to the abituguru3 maintainer"
+ "(see MAINTAINERS)\n");
+ }
+#endif
/* Fill the sysfs attr array */
sysfs_attr_i = 0;
@@ -1109,6 +1121,46 @@ static struct platform_driver abituguru3_driver = {
.resume = abituguru3_resume
};
+#ifdef CONFIG_DMI
+
+static int __init abituguru3_dmi_detect(void)
+{
+ const char *board_vendor, *board_name;
+ int i, err = (force) ? 1 : -ENODEV;
+
+ board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/"))
+ return err;
+
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board_name)
+ return err;
+
+ for (i = 0; abituguru3_motherboards[i].id; i++) {
+ const char *dmi_name = abituguru3_motherboards[i].dmi_name;
+ if (dmi_name && !strcmp(dmi_name, board_name))
+ break;
+ }
+
+ if (!abituguru3_motherboards[i].id)
+ return 1;
+
+ return 0;
+}
+
+#else /* !CONFIG_DMI */
+
+static inline int abituguru3_dmi_detect(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_DMI */
+
+/* FIXME: Manual detection should die eventually; we need to collect stable
+ * DMI model names first before we can rely entirely on CONFIG_DMI.
+ */
+
static int __init abituguru3_detect(void)
{
/* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or
@@ -1119,7 +1171,7 @@ static int __init abituguru3_detect(void)
if (((data_val == 0x00) || (data_val == 0x08)) &&
((cmd_val == 0xAC) || (cmd_val == 0x05) ||
(cmd_val == 0x55)))
- return ABIT_UGURU3_BASE;
+ return 0;
ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = "
"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
@@ -1127,7 +1179,7 @@ static int __init abituguru3_detect(void)
if (force) {
printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
"present because of \"force\" parameter\n");
- return ABIT_UGURU3_BASE;
+ return 0;
}
/* No uGuru3 found */
@@ -1138,27 +1190,29 @@ static struct platform_device *abituguru3_pdev;
static int __init abituguru3_init(void)
{
- int address, err;
struct resource res = { .flags = IORESOURCE_IO };
-
-#ifdef CONFIG_DMI
- const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
-
- /* safety check, refuse to load on non Abit motherboards */
- if (!force && (!board_vendor ||
- strcmp(board_vendor, "http://www.abit.com.tw/")))
- return -ENODEV;
-#endif
-
- address = abituguru3_detect();
- if (address < 0)
- return address;
+ int err;
+
+ /* Attempt DMI detection first */
+ err = abituguru3_dmi_detect();
+ if (err < 0)
+ return err;
+
+ /* Fall back to manual detection if there was no exact
+ * board name match, or force was specified.
+ */
+ if (err > 0) {
+ err = abituguru3_detect();
+ if (err)
+ return err;
+ }
err = platform_driver_register(&abituguru3_driver);
if (err)
goto exit;
- abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, address);
+ abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME,
+ ABIT_UGURU3_BASE);
if (!abituguru3_pdev) {
printk(KERN_ERR ABIT_UGURU3_NAME
": Device allocation failed\n");
@@ -1166,8 +1220,8 @@ static int __init abituguru3_init(void)
goto exit_driver_unregister;
}
- res.start = address;
- res.end = address + ABIT_UGURU3_REGION_LENGTH - 1;
+ res.start = ABIT_UGURU3_BASE;
+ res.end = ABIT_UGURU3_BASE + ABIT_UGURU3_REGION_LENGTH - 1;
res.name = ABIT_UGURU3_NAME;
err = platform_device_add_resources(abituguru3_pdev, &res, 1);
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
new file mode 100644
index 0000000..242294d
--- /dev/null
+++ b/drivers/hwmon/adcxx.c
@@ -0,0 +1,329 @@
+/*
+ * adcxx.c
+ *
+ * The adcxx4s is an AD converter family from National Semiconductor (NS).
+ *
+ * Copyright (c) 2008 Marc Pignat <marc.pignat@hevs.ch>
+ *
+ * The adcxx4s communicates with a host processor via an SPI/Microwire Bus
+ * interface. This driver supports the whole family of devices with name
+ * ADC<bb><c>S<sss>, where
+ * * bb is the resolution in number of bits (8, 10, 12)
+ * * c is the number of channels (1, 2, 4, 8)
+ * * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500 kSPS
+ * and 101 for 1 MSPS)
+ *
+ * Complete datasheets are available at National's website here:
+ * http://www.national.com/ds/DC/ADC<bb><c>S<sss>.pdf
+ *
+ * Handling of 8, 10 and 12 bits converters are the same, the
+ * unavailable bits are 0 :)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+
+#define DRVNAME "adcxx"
+
+struct adcxx {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u32 channels;
+ u32 reference; /* in millivolts */
+};
+
+/* sysfs hook function */
+static ssize_t adcxx_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ u8 tx_buf[2] = { attr->index << 3 }; /* other bits are don't care */
+ u8 rx_buf[2];
+ int status;
+ int value;
+
+ if (mutex_lock_interruptible(&adc->lock))
+ return -ERESTARTSYS;
+
+ status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf),
+ rx_buf, sizeof(rx_buf));
+ if (status < 0) {
+ dev_warn(dev, "spi_write_then_read failed with status %d\n",
+ status);
+ goto out;
+ }
+
+ value = (rx_buf[0] << 8) + rx_buf[1];
+ dev_dbg(dev, "raw value = 0x%x\n", value);
+
+ value = value * adc->reference >> 12;
+ status = sprintf(buf, "%d\n", value);
+out:
+ mutex_unlock(&adc->lock);
+ return status;
+}
+
+static ssize_t adcxx_show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ /* The minimum reference is 0 for this chip family */
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t adcxx_show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ u32 reference;
+
+ if (mutex_lock_interruptible(&adc->lock))
+ return -ERESTARTSYS;
+
+ reference = adc->reference;
+
+ mutex_unlock(&adc->lock);
+
+ return sprintf(buf, "%d\n", reference);
+}
+
+static ssize_t adcxx_set_max(struct device *dev,
+ struct device_attribute *devattr, const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 10, &value))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&adc->lock))
+ return -ERESTARTSYS;
+
+ adc->reference = value;
+
+ mutex_unlock(&adc->lock);
+
+ return count;
+}
+
+static ssize_t adcxx_show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+
+ return sprintf(buf, "adcxx%ds\n", adc->channels);
+}
+
+static struct sensor_device_attribute ad_input[] = {
+ SENSOR_ATTR(name, S_IRUGO, adcxx_show_name, NULL, 0),
+ SENSOR_ATTR(in_min, S_IRUGO, adcxx_show_min, NULL, 0),
+ SENSOR_ATTR(in_max, S_IWUSR | S_IRUGO, adcxx_show_max,
+ adcxx_set_max, 0),
+ SENSOR_ATTR(in0_input, S_IRUGO, adcxx_read, NULL, 0),
+ SENSOR_ATTR(in1_input, S_IRUGO, adcxx_read, NULL, 1),
+ SENSOR_ATTR(in2_input, S_IRUGO, adcxx_read, NULL, 2),
+ SENSOR_ATTR(in3_input, S_IRUGO, adcxx_read, NULL, 3),
+ SENSOR_ATTR(in4_input, S_IRUGO, adcxx_read, NULL, 4),
+ SENSOR_ATTR(in5_input, S_IRUGO, adcxx_read, NULL, 5),
+ SENSOR_ATTR(in6_input, S_IRUGO, adcxx_read, NULL, 6),
+ SENSOR_ATTR(in7_input, S_IRUGO, adcxx_read, NULL, 7),
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __devinit adcxx_probe(struct spi_device *spi, int channels)
+{
+ struct adcxx *adc;
+ int status;
+ int i;
+
+ adc = kzalloc(sizeof *adc, GFP_KERNEL);
+ if (!adc)
+ return -ENOMEM;
+
+ /* set a default value for the reference */
+ adc->reference = 3300;
+ adc->channels = channels;
+ mutex_init(&adc->lock);
+
+ mutex_lock(&adc->lock);
+
+ dev_set_drvdata(&spi->dev, adc);
+
+ for (i = 0; i < 3 + adc->channels; i++) {
+ status = device_create_file(&spi->dev, &ad_input[i].dev_attr);
+ if (status) {
+ dev_err(&spi->dev, "device_create_file failed.\n");
+ goto out_err;
+ }
+ }
+
+ adc->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(adc->hwmon_dev)) {
+ dev_err(&spi->dev, "hwmon_device_register failed.\n");
+ status = PTR_ERR(adc->hwmon_dev);
+ goto out_err;
+ }
+
+ mutex_unlock(&adc->lock);
+ return 0;
+
+out_err:
+ for (i--; i >= 0; i--)
+ device_remove_file(&spi->dev, &ad_input[i].dev_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_unlock(&adc->lock);
+ kfree(adc);
+ return status;
+}
+
+static int __devinit adcxx1s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 1);
+}
+
+static int __devinit adcxx2s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 2);
+}
+
+static int __devinit adcxx4s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 4);
+}
+
+static int __devinit adcxx8s_probe(struct spi_device *spi)
+{
+ return adcxx_probe(spi, 8);
+}
+
+static int __devexit adcxx_remove(struct spi_device *spi)
+{
+ struct adcxx *adc = dev_get_drvdata(&spi->dev);
+ int i;
+
+ mutex_lock(&adc->lock);
+ hwmon_device_unregister(adc->hwmon_dev);
+ for (i = 0; i < 3 + adc->channels; i++)
+ device_remove_file(&spi->dev, &ad_input[i].dev_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_unlock(&adc->lock);
+ kfree(adc);
+
+ return 0;
+}
+
+static struct spi_driver adcxx1s_driver = {
+ .driver = {
+ .name = "adcxx1s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx1s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static struct spi_driver adcxx2s_driver = {
+ .driver = {
+ .name = "adcxx2s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx2s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static struct spi_driver adcxx4s_driver = {
+ .driver = {
+ .name = "adcxx4s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx4s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static struct spi_driver adcxx8s_driver = {
+ .driver = {
+ .name = "adcxx8s",
+ .owner = THIS_MODULE,
+ },
+ .probe = adcxx8s_probe,
+ .remove = __devexit_p(adcxx_remove),
+};
+
+static int __init init_adcxx(void)
+{
+ int status;
+ status = spi_register_driver(&adcxx1s_driver);
+ if (status)
+ goto reg_1_failed;
+
+ status = spi_register_driver(&adcxx2s_driver);
+ if (status)
+ goto reg_2_failed;
+
+ status = spi_register_driver(&adcxx4s_driver);
+ if (status)
+ goto reg_4_failed;
+
+ status = spi_register_driver(&adcxx8s_driver);
+ if (status)
+ goto reg_8_failed;
+
+ return status;
+
+reg_8_failed:
+ spi_unregister_driver(&adcxx4s_driver);
+reg_4_failed:
+ spi_unregister_driver(&adcxx2s_driver);
+reg_2_failed:
+ spi_unregister_driver(&adcxx1s_driver);
+reg_1_failed:
+ return status;
+}
+
+static void __exit exit_adcxx(void)
+{
+ spi_unregister_driver(&adcxx1s_driver);
+ spi_unregister_driver(&adcxx2s_driver);
+ spi_unregister_driver(&adcxx4s_driver);
+ spi_unregister_driver(&adcxx8s_driver);
+}
+
+module_init(init_adcxx);
+module_exit(exit_adcxx);
+
+MODULE_AUTHOR("Marc Pignat");
+MODULE_DESCRIPTION("National Semiconductor adcxx8sxxx Linux driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("adcxx1s");
+MODULE_ALIAS("adcxx2s");
+MODULE_ALIAS("adcxx4s");
+MODULE_ALIAS("adcxx8s");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index aacc0c4..b06b8e0 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -98,6 +98,12 @@ static const char* temperature_sensors_sets[][36] = {
"TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S",
"TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P",
"TM9S", "TN0H", "TS0C", NULL },
+/* Set 5: iMac */
+ { "TC0D", "TA0P", "TG0P", "TG0D", "TG0H", "TH0P", "Tm0P", "TO0P",
+ "Tp0C", NULL },
+/* Set 6: Macbook3 set */
+ { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H",
+ "Th0S", "Th1H", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -1223,6 +1229,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 0, .light = 0, .temperature_set = 3 },
/* MacPro: temperature set 4 */
{ .accelerometer = 0, .light = 0, .temperature_set = 4 },
+/* iMac: temperature set 5 */
+ { .accelerometer = 0, .light = 0, .temperature_set = 5 },
+/* MacBook3: accelerometer and temperature set 6 */
+ { .accelerometer = 1, .light = 0, .temperature_set = 6 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1232,10 +1242,14 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
(void*)&applesmc_dmi_data[0]},
- { applesmc_dmi_match, "Apple MacBook", {
+ { applesmc_dmi_match, "Apple MacBook (v2)", {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
(void*)&applesmc_dmi_data[1]},
+ { applesmc_dmi_match, "Apple MacBook (v3)", {
+ DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
+ (void*)&applesmc_dmi_data[6]},
{ applesmc_dmi_match, "Apple MacBook", {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
@@ -1248,6 +1262,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
(void*)&applesmc_dmi_data[4]},
+ { applesmc_dmi_match, "Apple iMac", {
+ DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
+ (void*)&applesmc_dmi_data[5]},
{ .ident = NULL }
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 70239ac..93c1722 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -413,10 +413,11 @@ static int __init coretemp_init(void)
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
- /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */
+ /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */
if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
!((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
- (c->x86_model == 0x16) || (c->x86_model == 0x17))) {
+ (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
+ (c->x86_model == 0x1A))) {
/* supported CPU not found, but report the unknown
family 6 CPU */
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 7b0a32c..c54eff9 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -37,13 +37,21 @@
* For VRD 10.0 and up, "VRD x.y Design Guide",
* available at http://developer.intel.com/.
*
- * AMD NPT 0Fh (Athlon64 & Opteron), AMD Publication 32559,
+ * AMD Athlon 64 and AMD Opteron Processors, AMD Publication 26094,
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/26094.PDF
+ * Table 74. VID Code Voltages
+ * This corresponds to an arbitrary VRM code of 24 in the functions below.
+ * These CPU models (K8 revision <= E) have 5 VID pins. See also:
+ * Revision Guide for AMD Athlon 64 and AMD Opteron Processors, AMD Publication 25759,
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/25759.pdf
+ *
+ * AMD NPT Family 0Fh Processors, AMD Publication 32559,
* http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf
* Table 71. VID Code Voltages
- * AMD Opteron processors don't follow the Intel specifications.
- * I'm going to "make up" 2.4 as the spec number for the Opterons.
- * No good reason just a mnemonic for the 24x Opteron processor
- * series.
+ * This corresponds to an arbitrary VRM code of 25 in the functions below.
+ * These CPU models (K8 revision >= F) have 6 VID pins. See also:
+ * Revision Guide for AMD NPT Family 0Fh Processors, AMD Publication 33610,
+ * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
*
* The 17 specification is in fact Intel Mobile Voltage Positioning -
* (IMVP-II). You can find more information in the datasheet of Max1718
@@ -95,7 +103,12 @@ int vid_from_reg(int val, u8 vrm)
return 0;
return((1600000 - (val - 2) * 6250 + 500) / 1000);
- case 24: /* AMD NPT 0Fh (Athlon64 & Opteron) */
+ case 24: /* Athlon64 & Opteron */
+ val &= 0x1f;
+ if (val == 0x1f)
+ return 0;
+ /* fall through */
+ case 25: /* AMD NPT 0Fh */
val &= 0x3f;
return (val < 32) ? 1550 - 25 * val
: 775 - (25 * (val - 31)) / 2;
@@ -157,11 +170,16 @@ struct vrm_model {
#ifdef CONFIG_X86
-/* the stepping parameter is highest acceptable stepping for current line */
+/*
+ * The stepping parameter is highest acceptable stepping for current line.
+ * The model match must be exact for 4-bit values. For model values 0x10
+ * and above (extended model), all models below the parameter will match.
+ */
static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
- {X86_VENDOR_AMD, 0xF, ANY, ANY, 24}, /* Athlon 64, Opteron and above VRM 24 */
+ {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
+ {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
{X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
{X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
{X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
@@ -189,6 +207,8 @@ static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor)
if (vrm_models[i].vendor==vendor)
if ((vrm_models[i].eff_family==eff_family)
&& ((vrm_models[i].eff_model==eff_model) ||
+ (vrm_models[i].eff_model >= 0x10 &&
+ eff_model <= vrm_models[i].eff_model) ||
(vrm_models[i].eff_model==ANY)) &&
(eff_stepping <= vrm_models[i].eff_stepping))
return vrm_models[i].vrm_type;
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index f9e2ed6..2ede938 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -81,6 +81,8 @@ static unsigned long amb_reg_temp(unsigned int amb)
#define MAX_AMBS_PER_CHANNEL 16
#define MAX_AMBS (MAX_MEM_CHANNELS * \
MAX_AMBS_PER_CHANNEL)
+#define CHANNEL_SHIFT 4
+#define DIMM_MASK 0xF
/*
* Ugly hack: For some reason the highest bit is set if there
* are _any_ DIMMs in the channel. Attempting to read from
@@ -89,7 +91,7 @@ static unsigned long amb_reg_temp(unsigned int amb)
* might prevent us from seeing the 16th DIMM in the channel.
*/
#define REAL_MAX_AMBS_PER_CHANNEL 15
-#define KNOBS_PER_AMB 5
+#define KNOBS_PER_AMB 6
static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit)
{
@@ -238,6 +240,16 @@ static ssize_t show_amb_temp(struct device *dev,
500 * amb_read_byte(data, amb_reg_temp(attr->index)));
}
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return sprintf(buf, "Ch. %d DIMM %d\n", attr->index >> CHANNEL_SHIFT,
+ attr->index & DIMM_MASK);
+}
+
static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
{
int i, j, k, d = 0;
@@ -268,6 +280,20 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
continue;
d++;
+ /* sysfs label */
+ iattr = data->attrs + data->num_attrs;
+ snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
+ "temp%d_label", d);
+ iattr->s_attr.dev_attr.attr.name = iattr->name;
+ iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
+ iattr->s_attr.dev_attr.show = show_label;
+ iattr->s_attr.index = k;
+ res = device_create_file(&pdev->dev,
+ &iattr->s_attr.dev_attr);
+ if (res)
+ goto exit_remove;
+ data->num_attrs++;
+
/* Temperature sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index c9416e6..0f70dc2 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -1,6 +1,6 @@
/*
- * A hwmon driver for the IBM Active Energy Manager temperature/power sensors
- * and capping functionality.
+ * A hwmon driver for the IBM System Director Active Energy Manager (AEM)
+ * temperature/power/energy sensors and capping functionality.
* Copyright (C) 2008 IBM
*
* Author: Darrick J. Wong <djwong@us.ibm.com>
@@ -463,12 +463,18 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
}
/* Update AEM energy registers */
+static void update_aem_energy_one(struct aem_data *data, int which)
+{
+ aem_read_sensor(data, AEM_ENERGY_ELEMENT, which,
+ &data->energy[which], 8);
+}
+
static void update_aem_energy(struct aem_data *data)
{
- aem_read_sensor(data, AEM_ENERGY_ELEMENT, 0, &data->energy[0], 8);
+ update_aem_energy_one(data, 0);
if (data->ver_major < 2)
return;
- aem_read_sensor(data, AEM_ENERGY_ELEMENT, 1, &data->energy[1], 8);
+ update_aem_energy_one(data, 1);
}
/* Update all AEM1 sensors */
@@ -676,7 +682,8 @@ static int aem_find_aem2(struct aem_ipmi_data *data,
return -ETIMEDOUT;
if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) ||
- memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)))
+ memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)) ||
+ fi_resp->num_instances <= instance_num)
return -ENOENT;
return 0;
@@ -849,7 +856,7 @@ static ssize_t aem_show_power(struct device *dev,
struct timespec b, a;
mutex_lock(&data->lock);
- update_aem_energy(data);
+ update_aem_energy_one(data, attr->index);
getnstimeofday(&b);
before = data->energy[attr->index];
@@ -861,7 +868,7 @@ static ssize_t aem_show_power(struct device *dev,
return 0;
}
- update_aem_energy(data);
+ update_aem_energy_one(data, attr->index);
getnstimeofday(&a);
after = data->energy[attr->index];
mutex_unlock(&data->lock);
@@ -880,7 +887,9 @@ static ssize_t aem_show_energy(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
- a->update(a);
+ mutex_lock(&a->lock);
+ update_aem_energy_one(a, attr->index);
+ mutex_unlock(&a->lock);
return sprintf(buf, "%llu\n",
(unsigned long long)a->energy[attr->index] * 1000);
@@ -1104,7 +1113,7 @@ static void __exit aem_exit(void)
}
MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
-MODULE_DESCRIPTION("IBM Active Energy Manager power/temp sensor driver");
+MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver");
MODULE_LICENSE("GPL");
module_init(aem_init);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index daa7d12..de21142 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1055,9 +1055,10 @@ static int w83791d_probe(struct i2c_client *client,
{
struct w83791d_data *data;
struct device *dev = &client->dev;
- int i, val1, err;
+ int i, err;
#ifdef DEBUG
+ int val1;
val1 = w83791d_read(client, W83791D_REG_DID_VID4);
dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n",
(val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c1adcdb..9efb021 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 18355ae..4655b79 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1593,7 +1593,7 @@ fail1:
if (machine_is_omap_h2()) {
/* full speed signaling by default */
isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
- MC1_SPEED_REG);
+ MC1_SPEED);
isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2,
MC2_SPD_SUSP_CTRL);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 550853f..b346a68 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -108,6 +108,9 @@ static int i2c_device_probe(struct device *dev)
if (!driver->probe || !driver->id_table)
return -ENODEV;
client->driver = driver;
+ if (!device_can_wakeup(&client->dev))
+ device_init_wakeup(&client->dev,
+ client->flags & I2C_CLIENT_WAKE);
dev_dbg(dev, "probe\n");
status = driver->probe(client, i2c_match_id(driver->id_table, client));
@@ -262,9 +265,8 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->adapter = adap;
client->dev.platform_data = info->platform_data;
- device_init_wakeup(&client->dev, info->flags & I2C_CLIENT_WAKE);
- client->flags = info->flags & ~I2C_CLIENT_WAKE;
+ client->flags = info->flags;
client->addr = info->addr;
client->irq = info->irq;
@@ -1188,8 +1190,8 @@ int i2c_probe(struct i2c_adapter *adapter,
&& address_data->normal_i2c[0] == I2C_CLIENT_END)
return 0;
- dev_warn(&adapter->dev, "SMBus Quick command not supported, "
- "can't probe for chips\n");
+ dev_dbg(&adapter->dev, "SMBus Quick command not supported, "
+ "can't probe for chips\n");
return -EOPNOTSUPP;
}
@@ -1350,6 +1352,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
}
}
+ /* Stop here if the classes do not match */
+ if (!(adapter->class & driver->class))
+ goto exit_free;
+
/* Stop here if we can't use SMBUS_QUICK */
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) {
if (address_data->probe[0] == I2C_CLIENT_END
@@ -1362,10 +1368,6 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
goto exit_free;
}
- /* Stop here if the classes do not match */
- if (!(adapter->class & driver->class))
- goto exit_free;
-
/* Probe entries are done second, and are not affected by ignore
entries either */
for (i = 0; address_data->probe[i] != I2C_CLIENT_END; i += 2) {
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 89a112d..49a8c58 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1272,9 +1272,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
*/
static void msf_from_bcd(struct atapi_msf *msf)
{
- msf->minute = BCD2BIN(msf->minute);
- msf->second = BCD2BIN(msf->second);
- msf->frame = BCD2BIN(msf->frame);
+ msf->minute = bcd2bin(msf->minute);
+ msf->second = bcd2bin(msf->second);
+ msf->frame = bcd2bin(msf->frame);
}
int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
@@ -1415,8 +1415,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
return stat;
if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
- toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
+ toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
}
ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
@@ -1456,8 +1456,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
return stat;
if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT);
- toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT);
+ toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT);
+ toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT);
} else {
toc->hdr.first_track = CDROM_LEADOUT;
toc->hdr.last_track = CDROM_LEADOUT;
@@ -1470,14 +1470,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
- toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
+ toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
}
for (i = 0; i <= ntracks; i++) {
if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
- toc->ent[i].track = BCD2BIN(toc->ent[i].track);
+ toc->ent[i].track = bcd2bin(toc->ent[i].track);
msf_from_bcd(&toc->ent[i].addr.msf);
}
toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index 40644b6..3187215 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -307,7 +307,7 @@ static struct pci_driver driver = {
.name = "AEC62xx_IDE",
.id_table = aec62xx_pci_tbl,
.probe = aec62xx_init_one,
- .remove = aec62xx_remove,
+ .remove = __devexit_p(aec62xx_remove),
};
static int __init aec62xx_ide_init(void)
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index bfae2f8..e6d8ee8 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -447,7 +447,7 @@ static struct pci_driver driver = {
.name = "Cypress_IDE",
.id_table = cy82c693_pci_tbl,
.probe = cy82c693_init_one,
- .remove = cy82c693_remove,
+ .remove = __devexit_p(cy82c693_remove),
};
static int __init cy82c693_ide_init(void)
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 748793a..eb107ee 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1620,7 +1620,7 @@ static struct pci_driver driver = {
.name = "HPT366_IDE",
.id_table = hpt366_pci_tbl,
.probe = hpt366_init_one,
- .remove = hpt366_remove,
+ .remove = __devexit_p(hpt366_remove),
};
static int __init hpt366_ide_init(void)
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index b6dc723..4a1508a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -686,7 +686,7 @@ static struct pci_driver driver = {
.name = "ITE821x IDE",
.id_table = it821x_pci_tbl,
.probe = it821x_init_one,
- .remove = it821x_remove,
+ .remove = __devexit_p(it821x_remove),
};
static int __init it821x_ide_init(void)
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 0f609b7..d477da6 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -566,7 +566,7 @@ static struct pci_driver driver = {
.name = "Promise_IDE",
.id_table = pdc202new_pci_tbl,
.probe = pdc202new_init_one,
- .remove = pdc202new_remove,
+ .remove = __devexit_p(pdc202new_remove),
};
static int __init pdc202new_ide_init(void)
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 6cde48b..44cccd1 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -954,7 +954,7 @@ static struct pci_driver driver = {
.name = "SCC IDE",
.id_table = scc_pci_tbl,
.probe = scc_init_one,
- .remove = scc_remove,
+ .remove = __devexit_p(scc_remove),
};
static int scc_ide_init(void)
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 42eef19..681306c 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -621,9 +621,9 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
DRV_NAME)) {
printk(KERN_ERR
- "%s : %s -- ERROR, Addresses "
+ "%s %s: -- ERROR, Addresses "
"0x%p to 0x%p ALREADY in use\n",
- __func__, DRV_NAME, (void *) cmd_phys_base,
+ DRV_NAME, pci_name(dev), (void *)cmd_phys_base,
(void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
return -ENOMEM;
}
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 445ce6f..db2b88a 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -832,7 +832,7 @@ static struct pci_driver driver = {
.name = "SiI_IDE",
.id_table = siimage_pci_tbl,
.probe = siimage_init_one,
- .remove = siimage_remove,
+ .remove = __devexit_p(siimage_remove),
};
static int __init siimage_ide_init(void)
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index e5a4b42..5efe21d 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -610,7 +610,7 @@ static struct pci_driver driver = {
.name = "SIS_IDE",
.id_table = sis5513_pci_tbl,
.probe = sis5513_init_one,
- .remove = sis5513_remove,
+ .remove = __devexit_p(sis5513_remove),
};
static int __init sis5513_ide_init(void)
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 7fc88c3..927277c 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -249,7 +249,7 @@ static struct pci_driver driver = {
.name = "TC86C001",
.id_table = tc86c001_pci_tbl,
.probe = tc86c001_init_one,
- .remove = tc86c001_remove,
+ .remove = __devexit_p(tc86c001_remove),
};
static int __init tc86c001_ide_init(void)
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index a6b2cc8..94fb9ab 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -491,7 +491,7 @@ static struct pci_driver driver = {
.name = "VIA_IDE",
.id_table = via_pci_tbl,
.probe = via_init_one,
- .remove = via_remove,
+ .remove = __devexit_p(via_remove),
};
static int __init via_ide_init(void)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 994a21e..16240a7 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -844,7 +844,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
ne->host = host;
ne->nodeid = nodeid;
ne->generation = generation;
- ne->needs_probe = 1;
+ ne->needs_probe = true;
ne->guid = guid;
ne->guid_vendor_id = (guid >> 40) & 0xffffff;
@@ -1144,7 +1144,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
struct csr1212_keyval *kv, *vendor_name_kv = NULL;
u8 last_key_id = 0;
- ne->needs_probe = 0;
+ ne->needs_probe = false;
csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) {
switch (kv->key.id) {
@@ -1295,7 +1295,7 @@ static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
nodemgr_update_bus_options(ne);
/* Mark the node as new, so it gets re-probed */
- ne->needs_probe = 1;
+ ne->needs_probe = true;
} else {
/* old cache is valid, so update its generation */
struct nodemgr_csr_info *ci = ne->csr->private;
@@ -1566,57 +1566,60 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
struct probe_param {
struct host_info *hi;
int generation;
+ bool probe_now;
};
-static int __nodemgr_node_probe(struct device *dev, void *data)
+static int node_probe(struct device *dev, void *data)
{
- struct probe_param *param = (struct probe_param *)data;
+ struct probe_param *p = data;
struct node_entry *ne;
+ if (p->generation != get_hpsb_generation(p->hi->host))
+ return -EAGAIN;
+
ne = container_of(dev, struct node_entry, node_dev);
- if (!ne->needs_probe)
- nodemgr_probe_ne(param->hi, ne, param->generation);
- if (ne->needs_probe)
- nodemgr_probe_ne(param->hi, ne, param->generation);
+ if (ne->needs_probe == p->probe_now)
+ nodemgr_probe_ne(p->hi, ne, p->generation);
return 0;
}
static void nodemgr_node_probe(struct host_info *hi, int generation)
{
- struct hpsb_host *host = hi->host;
- struct probe_param param;
+ struct probe_param p;
- param.hi = hi;
- param.generation = generation;
- /* Do some processing of the nodes we've probed. This pulls them
+ p.hi = hi;
+ p.generation = generation;
+ /*
+ * Do some processing of the nodes we've probed. This pulls them
* into the sysfs layer if needed, and can result in processing of
* unit-directories, or just updating the node and it's
* unit-directories.
*
* Run updates before probes. Usually, updates are time-critical
- * while probes are time-consuming. (Well, those probes need some
- * improvement...) */
-
- class_for_each_device(&nodemgr_ne_class, NULL, &param,
- __nodemgr_node_probe);
-
- /* If we had a bus reset while we were scanning the bus, it is
- * possible that we did not probe all nodes. In that case, we
- * skip the clean up for now, since we could remove nodes that
- * were still on the bus. Another bus scan is pending which will
- * do the clean up eventually.
+ * while probes are time-consuming.
*
+ * Meanwhile, another bus reset may have happened. In this case we
+ * skip everything here and let the next bus scan handle it.
+ * Otherwise we may prematurely remove nodes which are still there.
+ */
+ p.probe_now = false;
+ if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
+ return;
+
+ p.probe_now = true;
+ if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
+ return;
+ /*
* Now let's tell the bus to rescan our devices. This may seem
* like overhead, but the driver-model core will only scan a
* device for a driver when either the device is added, or when a
* new driver is added. A bus reset is a good reason to rescan
* devices that were there before. For example, an sbp2 device
* may become available for login, if the host that held it was
- * just removed. */
-
- if (generation == get_hpsb_generation(host))
- if (bus_rescan_devices(&ieee1394_bus_type))
- HPSB_DEBUG("bus_rescan_devices had an error");
+ * just removed.
+ */
+ if (bus_rescan_devices(&ieee1394_bus_type) != 0)
+ HPSB_DEBUG("bus_rescan_devices had an error");
}
static int nodemgr_send_resume_packet(struct hpsb_host *host)
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 919e92e..6eb2646 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -97,7 +97,7 @@ struct node_entry {
struct hpsb_host *host; /* Host this node is attached to */
nodeid_t nodeid; /* NodeID */
struct bus_options busopt; /* Bus Options */
- int needs_probe;
+ bool needs_probe;
unsigned int generation; /* Synced with hpsb generation */
/* The following is read from the config rom */
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 9cbf315..1d6ad34 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -731,15 +731,26 @@ static int sbp2_update(struct unit_directory *ud)
{
struct sbp2_lu *lu = ud->device.driver_data;
- if (sbp2_reconnect_device(lu)) {
- /* Reconnect has failed. Perhaps we didn't reconnect fast
- * enough. Try a regular login, but first log out just in
- * case of any weirdness. */
+ if (sbp2_reconnect_device(lu) != 0) {
+ /*
+ * Reconnect failed. If another bus reset happened,
+ * let nodemgr proceed and call sbp2_update again later
+ * (or sbp2_remove if this node went away).
+ */
+ if (!hpsb_node_entry_valid(lu->ne))
+ return 0;
+ /*
+ * Or the target rejected the reconnect because we weren't
+ * fast enough. Try a regular login, but first log out
+ * just in case of any weirdness.
+ */
sbp2_logout_device(lu);
- if (sbp2_login_device(lu)) {
- /* Login failed too, just fail, and the backend
- * will call our sbp2_remove for us */
+ if (sbp2_login_device(lu) != 0) {
+ if (!hpsb_node_entry_valid(lu->ne))
+ return 0;
+
+ /* Maybe another initiator won the login. */
SBP2_ERR("Failed to reconnect to sbp2 device!");
return -EBUSY;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0b0618e..1ab919f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm {
#define EHCA_MOD_QP_PARM_MAX 4
+#define QMAP_IDX_MASK 0xFFFFULL
+
+/* struct for tracking if cqes have been reported to the application */
+struct ehca_qmap_entry {
+ u16 app_wr_id;
+ u16 reported;
+};
+
struct ehca_qp {
union {
struct ib_qp ib_qp;
@@ -165,6 +173,7 @@ struct ehca_qp {
enum ehca_ext_qp_type ext_type;
enum ib_qp_state state;
struct ipz_queue ipz_squeue;
+ struct ehca_qmap_entry *sq_map;
struct ipz_queue ipz_rqueue;
struct h_galpas galpas;
u32 qkey;
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
index 8188030..5d28e3e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -213,6 +213,7 @@ struct ehca_wqe {
#define WC_STATUS_ERROR_BIT 0x80000000
#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
#define WC_STATUS_PURGE_BIT 0x10
+#define WC_SEND_RECEIVE_BIT 0x80
struct ehca_cqe {
u64 work_request_id;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index ea13efd..b6bcee0 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp(
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
struct ib_ucontext *context = NULL;
+ u32 nr_qes;
u64 h_ret;
int is_llqp = 0, has_srq = 0;
int qp_type, max_send_sge, max_recv_sge, ret;
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i", ret);
goto create_qp_exit2;
}
+ nr_qes = my_qp->ipz_squeue.queue_length /
+ my_qp->ipz_squeue.qe_size;
+ my_qp->sq_map = vmalloc(nr_qes *
+ sizeof(struct ehca_qmap_entry));
+ if (!my_qp->sq_map) {
+ ehca_err(pd->device, "Couldn't allocate squeue "
+ "map ret=%i", ret);
+ goto create_qp_exit3;
+ }
}
if (HAS_RQ(my_qp)) {
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp(
if (ret) {
ehca_err(pd->device, "Couldn't initialize rqueue "
"and pages ret=%i", ret);
- goto create_qp_exit3;
+ goto create_qp_exit4;
}
}
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp(
if (!my_qp->mod_qp_parm) {
ehca_err(pd->device,
"Could not alloc mod_qp_parm");
- goto create_qp_exit4;
+ goto create_qp_exit5;
}
}
}
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp(
h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- goto create_qp_exit5;
+ goto create_qp_exit6;
}
}
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp(
if (ret) {
ehca_err(pd->device,
"Couldn't assign qp to send_cq ret=%i", ret);
- goto create_qp_exit5;
+ goto create_qp_exit6;
}
}
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp(
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
- goto create_qp_exit6;
+ goto create_qp_exit7;
}
}
return my_qp;
-create_qp_exit6:
+create_qp_exit7:
ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-create_qp_exit5:
+create_qp_exit6:
kfree(my_qp->mod_qp_parm);
-create_qp_exit4:
+create_qp_exit5:
if (HAS_RQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
+create_qp_exit4:
+ if (HAS_SQ(my_qp))
+ vfree(my_qp->sq_map);
+
create_qp_exit3:
if (HAS_SQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
@@ -1534,8 +1548,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_QKEY)
my_qp->qkey = attr->qkey;
- my_qp->state = qp_new_state;
-
modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1551,6 +1563,8 @@ modify_qp_exit1:
int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
+ int ret = 0;
+
struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
ib_device);
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
@@ -1597,12 +1611,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
attr->qp_state, my_qp->init_attr.port_num,
ibqp->qp_type);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
- return 0;
+ goto out;
}
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
}
- return internal_modify_qp(ibqp, attr, attr_mask, 0);
+ ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
+
+out:
+ if ((ret == 0) && (attr_mask & IB_QP_STATE))
+ my_qp->state = attr->qp_state;
+
+ return ret;
}
void ehca_recover_sqp(struct ib_qp *sqp)
@@ -1973,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if (HAS_RQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
- if (HAS_SQ(my_qp))
+ if (HAS_SQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
+ vfree(my_qp->sq_map);
+ }
kmem_cache_free(qp_cache, my_qp);
atomic_dec(&shca->num_qps);
return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 898c8b5..4426d82 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
static inline int ehca_write_swqe(struct ehca_qp *qp,
struct ehca_wqe *wqe_p,
const struct ib_send_wr *send_wr,
+ u32 sq_map_idx,
int hidden)
{
u32 idx;
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
/* clear wqe header until sglist */
memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
- wqe_p->work_request_id = send_wr->wr_id;
+ wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK;
+ wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
+
+ qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK;
+ qp->sq_map[sq_map_idx].reported = 0;
switch (send_wr->opcode) {
case IB_WR_SEND:
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp,
{
struct ehca_wqe *wqe_p;
int ret;
+ u32 sq_map_idx;
u64 start_offset = my_qp->ipz_squeue.current_q_offset;
/* get pointer next to free WQE */
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp,
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
}
+
+ /*
+ * Get the index of the WQE in the send queue. The same index is used
+ * for writing into the sq_map.
+ */
+ sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
+
/* write a SEND WQE into the QUEUE */
- ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
+ ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
/*
* if something failed,
* reset the free entry pointer to the start value
@@ -589,7 +602,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
struct ehca_qp *my_qp;
int cqe_count = 0, is_error;
-poll_cq_one_read_cqe:
+repoll:
cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) {
@@ -617,7 +630,7 @@ poll_cq_one_read_cqe:
ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
my_cq->cq_number, cqe->local_qp_number);
/* ignore this purged cqe */
- goto poll_cq_one_read_cqe;
+ goto repoll;
}
spin_lock_irqsave(&qp->spinlock_s, flags);
purgeflag = qp->sqerr_purgeflag;
@@ -636,7 +649,7 @@ poll_cq_one_read_cqe:
* that caused sqe and turn off purge flag
*/
qp->sqerr_purgeflag = 0;
- goto poll_cq_one_read_cqe;
+ goto repoll;
}
}
@@ -654,8 +667,34 @@ poll_cq_one_read_cqe:
my_cq, my_cq->cq_number);
}
- /* we got a completion! */
- wc->wr_id = cqe->work_request_id;
+ read_lock(&ehca_qp_idr_lock);
+ my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
+ read_unlock(&ehca_qp_idr_lock);
+ if (!my_qp)
+ goto repoll;
+ wc->qp = &my_qp->ib_qp;
+
+ if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) {
+ struct ehca_qmap_entry *qmap_entry;
+ /*
+ * We got a send completion and need to restore the original
+ * wr_id.
+ */
+ qmap_entry = &my_qp->sq_map[cqe->work_request_id &
+ QMAP_IDX_MASK];
+
+ if (qmap_entry->reported) {
+ ehca_warn(cq->device, "Double cqe on qp_num=%#x",
+ my_qp->real_qp_num);
+ /* found a double cqe, discard it and read next one */
+ goto repoll;
+ }
+ wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK;
+ wc->wr_id |= qmap_entry->app_wr_id;
+ qmap_entry->reported = 1;
+ } else
+ /* We got a receive completion. */
+ wc->wr_id = cqe->work_request_id;
/* eval ib_wc_opcode */
wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -667,7 +706,7 @@ poll_cq_one_read_cqe:
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
/* update also queue adder to throw away this entry!!! */
- goto poll_cq_one_exit0;
+ goto repoll;
}
/* eval ib_wc_status */
@@ -678,11 +717,6 @@ poll_cq_one_read_cqe:
} else
wc->status = IB_WC_SUCCESS;
- read_lock(&ehca_qp_idr_lock);
- my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
- wc->qp = &my_qp->ib_qp;
- read_unlock(&ehca_qp_idr_lock);
-
wc->byte_len = cqe->nr_bytes_transferred;
wc->pkey_index = cqe->pkey_index;
wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index ec950bf..21f7d06 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -54,7 +54,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/device.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 23faba9..8bb5170 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index d90f5e9..9839e20 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -1720,7 +1720,7 @@ static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
"not 2KB aligned!\n", pa);
return;
}
- if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
+ if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
ipath_dev_err(dd,
"BUG: Physical page address 0x%lx "
"larger than supported\n", pa);
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 36aa242..729446f 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -267,6 +267,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
u16 lrh0;
u16 lid;
int ret = 0;
+ int next_cur;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -290,8 +291,9 @@ int ipath_make_ud_req(struct ipath_qp *qp)
goto bail;
wqe = get_swqe_ptr(qp, qp->s_cur);
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
+ next_cur = qp->s_cur + 1;
+ if (next_cur >= qp->s_size)
+ next_cur = 0;
/* Construct the header. */
ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
@@ -315,6 +317,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
qp->s_flags |= IPATH_S_WAIT_DMA;
goto bail;
}
+ qp->s_cur = next_cur;
spin_unlock_irqrestore(&qp->s_lock, flags);
ipath_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -323,6 +326,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
}
}
+ qp->s_cur = next_cur;
extra_bytes = -wqe->length & 3;
nwords = (wqe->length + extra_bytes) >> 2;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index a4cdb46..87f5c5a 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -204,6 +204,8 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
if (err)
goto err_mr;
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
+
return &mr->ibmr;
err_mr:
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 39bd897..8eb7ae9 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -43,7 +43,6 @@
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <asm/io.h>
#include <linux/crc32c.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7ebc400..341ffed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -202,7 +202,7 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
dev_kfree_skb_any(rx_ring[i].skb);
}
- kfree(rx_ring);
+ vfree(rx_ring);
}
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
@@ -352,9 +352,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int ret;
int i;
- rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
- if (!rx->rx_ring)
+ rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+ if (!rx->rx_ring) {
+ printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
+ priv->ca->name, ipoib_recvq_size);
return -ENOMEM;
+ }
+
+ memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
@@ -1494,14 +1499,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
return;
}
- priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
- GFP_KERNEL);
+ priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
+ return;
}
+
+ memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
}
int ipoib_cm_dev_init(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index f51201b..7e9e218 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -156,14 +156,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- /*
- * Now flush workqueue to make sure a scheduled task doesn't
- * bring our internal state back up.
- */
- flush_workqueue(ipoib_workqueue);
-
- ipoib_ib_dev_down(dev, 1);
- ipoib_ib_dev_stop(dev, 1);
+ ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_stop(dev, 0);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -1314,7 +1308,7 @@ sysfs_failed:
register_failed:
ib_unregister_event_handler(&priv->event_handler);
- flush_scheduled_work();
+ flush_workqueue(ipoib_workqueue);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1373,7 +1367,12 @@ static void ipoib_remove_one(struct ib_device *device)
list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler);
- flush_scheduled_work();
+
+ rtnl_lock();
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
+ rtnl_unlock();
+
+ flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev);
ipoib_dev_cleanup(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 8950e95..ac33c8f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -392,8 +392,16 @@ static int ipoib_mcast_join_complete(int status,
&priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
- if (mcast == priv->broadcast)
+ if (mcast == priv->broadcast) {
+ /*
+ * Take RTNL lock here to avoid racing with
+ * ipoib_stop() and turning the carrier back
+ * on while a device is being removed.
+ */
+ rtnl_lock();
netif_carrier_on(dev);
+ rtnl_unlock();
+ }
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 63462ec..26ff621 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include "iscsi_iser.h"
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2d65411..3524bef 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -647,6 +647,47 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
return copy_to_user(p, str, len) ? -EFAULT : len;
}
+#define OLD_KEY_MAX 0x1ff
+static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user *p, int compat_mode)
+{
+ static unsigned long keymax_warn_time;
+ unsigned long *bits;
+ int len;
+
+ switch (_IOC_NR(cmd) & EV_MAX) {
+
+ case 0: bits = dev->evbit; len = EV_MAX; break;
+ case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
+ case EV_REL: bits = dev->relbit; len = REL_MAX; break;
+ case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
+ case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
+ case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
+ case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
+ case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
+ case EV_SW: bits = dev->swbit; len = SW_MAX; break;
+ default: return -EINVAL;
+ }
+
+ /*
+ * Work around bugs in userspace programs that like to do
+ * EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len'
+ * should be in bytes, not in bits.
+ */
+ if ((_IOC_NR(cmd) & EV_MAX) == EV_KEY && _IOC_SIZE(cmd) == OLD_KEY_MAX) {
+ len = OLD_KEY_MAX;
+ if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
+ printk(KERN_WARNING
+ "evdev.c(EVIOCGBIT): Suspicious buffer size %u, "
+ "limiting output to %zu bytes. See "
+ "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
+ OLD_KEY_MAX,
+ BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
+ }
+
+ return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
+}
+#undef OLD_KEY_MAX
+
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
@@ -733,26 +774,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
if (_IOC_DIR(cmd) == _IOC_READ) {
- if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) {
-
- unsigned long *bits;
- int len;
-
- switch (_IOC_NR(cmd) & EV_MAX) {
-
- case 0: bits = dev->evbit; len = EV_MAX; break;
- case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
- case EV_REL: bits = dev->relbit; len = REL_MAX; break;
- case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
- case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
- case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
- case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
- case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
- case EV_SW: bits = dev->swbit; len = SW_MAX; break;
- default: return -EINVAL;
- }
- return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
- }
+ if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
+ return handle_eviocgbit(dev, cmd, p, compat_mode);
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd),
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 87d3e7e..6791be8 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -127,6 +127,7 @@ static const struct xpad_device {
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", MAP_DPAD_TO_AXES, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 54ed8e2..e348cfc 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -29,7 +29,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/fs.h>
@@ -44,7 +43,7 @@
#include <linux/input.h>
#include <asm/portmux.h>
-#include <asm/mach/bf54x_keys.h>
+#include <mach/bf54x_keys.h>
#define DRV_NAME "bf54x-keys"
#define TIME_SCALE 100 /* 100 ns */
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index be58730..ec96b36 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/fs.h>
@@ -118,6 +117,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
unsigned int type = button->type ?: EV_KEY;
bdata->input = input;
+ bdata->button = button;
setup_timer(&bdata->timer,
gpio_check_button, (unsigned long)bdata);
@@ -256,7 +256,7 @@ static int gpio_keys_resume(struct platform_device *pdev)
#define gpio_keys_resume NULL
#endif
-struct platform_driver gpio_keys_device_driver = {
+static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
.remove = __devexit_p(gpio_keys_remove),
.suspend = gpio_keys_suspend,
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 6a1f48b..2adf9cb 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -148,6 +148,9 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
return 0;
}
+MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_DESCRIPTION("Cobalt button interface driver");
+MODULE_LICENSE("GPL");
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:Cobalt buttons");
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 7bbea09..f996546 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -130,6 +130,29 @@ config MOUSE_APPLETOUCH
To compile this driver as a module, choose M here: the
module will be called appletouch.
+config MOUSE_BCM5974
+ tristate "Apple USB BCM5974 Multitouch trackpad support"
+ depends on USB_ARCH_HAS_HCD
+ select USB
+ help
+ Say Y here if you have an Apple USB BCM5974 Multitouch
+ trackpad.
+
+ The BCM5974 is the multitouch trackpad found in the Macbook
+ Air (JAN2008) and Macbook Pro Penryn (FEB2008) laptops.
+
+ It is also found in the IPhone (2007) and Ipod Touch (2008).
+
+ This driver provides multitouch functionality together with
+ the synaptics X11 driver.
+
+ The interface is currently identical to the appletouch interface,
+ for further information, see
+ <file:Documentation/input/appletouch.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm5974.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 9e6e363..d4d2025 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
+obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
new file mode 100644
index 0000000..2ec921b
--- /dev/null
+++ b/drivers/input/mouse/bcm5974.c
@@ -0,0 +1,681 @@
+/*
+ * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
+ *
+ * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
+ *
+ * The USB initialization and package decoding was made by
+ * Scott Shawcroft as part of the touchd user-space driver project:
+ * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com)
+ *
+ * The BCM5974 driver is based on the appletouch driver:
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net)
+ * Copyright (C) 2005 Stelian Pop (stelian@popies.net)
+ * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de)
+ * Copyright (C) 2005 Peter Osterlund (petero2@telia.com)
+ * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch)
+ * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/usb/input.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+
+#define USB_VENDOR_ID_APPLE 0x05ac
+
+/* MacbookAir, aka wellspring */
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
+#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
+/* MacbookProPenryn, aka wellspring2 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
+
+#define BCM5974_DEVICE(prod) { \
+ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL), \
+ .idVendor = USB_VENDOR_ID_APPLE, \
+ .idProduct = (prod), \
+ .bInterfaceClass = USB_INTERFACE_CLASS_HID, \
+ .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \
+}
+
+/* table of devices that work with this driver */
+static const struct usb_device_id bcm5974_table [] = {
+ /* MacbookAir1.1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
+ /* MacbookProPenryn */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
+ /* Terminating entry */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, bcm5974_table);
+
+MODULE_AUTHOR("Henrik Rydberg");
+MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, format, a...)\
+ { if (debug >= level) printk(KERN_DEBUG format, ##a); }
+
+static int debug = 1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Activate debugging output");
+
+/* button data structure */
+struct bt_data {
+ u8 unknown1; /* constant */
+ u8 button; /* left button */
+ u8 rel_x; /* relative x coordinate */
+ u8 rel_y; /* relative y coordinate */
+};
+
+/* trackpad header structure */
+struct tp_header {
+ u8 unknown1[16]; /* constants, timers, etc */
+ u8 fingers; /* number of fingers on trackpad */
+ u8 unknown2[9]; /* constants, timers, etc */
+};
+
+/* trackpad finger structure */
+struct tp_finger {
+ __le16 origin; /* left/right origin? */
+ __le16 abs_x; /* absolute x coodinate */
+ __le16 abs_y; /* absolute y coodinate */
+ __le16 rel_x; /* relative x coodinate */
+ __le16 rel_y; /* relative y coodinate */
+ __le16 size_major; /* finger size, major axis? */
+ __le16 size_minor; /* finger size, minor axis? */
+ __le16 orientation; /* 16384 when point, else 15 bit angle */
+ __le16 force_major; /* trackpad force, major axis? */
+ __le16 force_minor; /* trackpad force, minor axis? */
+ __le16 unused[3]; /* zeros */
+ __le16 multi; /* one finger: varies, more fingers: constant */
+};
+
+/* trackpad data structure, empirically at least ten fingers */
+struct tp_data {
+ struct tp_header header;
+ struct tp_finger finger[16];
+};
+
+/* device-specific parameters */
+struct bcm5974_param {
+ int dim; /* logical dimension */
+ int fuzz; /* logical noise value */
+ int devmin; /* device minimum reading */
+ int devmax; /* device maximum reading */
+};
+
+/* device-specific configuration */
+struct bcm5974_config {
+ int ansi, iso, jis; /* the product id of this device */
+ int bt_ep; /* the endpoint of the button interface */
+ int bt_datalen; /* data length of the button interface */
+ int tp_ep; /* the endpoint of the trackpad interface */
+ int tp_datalen; /* data length of the trackpad interface */
+ struct bcm5974_param p; /* finger pressure limits */
+ struct bcm5974_param w; /* finger width limits */
+ struct bcm5974_param x; /* horizontal limits */
+ struct bcm5974_param y; /* vertical limits */
+};
+
+/* logical device structure */
+struct bcm5974 {
+ char phys[64];
+ struct usb_device *udev; /* usb device */
+ struct usb_interface *intf; /* our interface */
+ struct input_dev *input; /* input dev */
+ struct bcm5974_config cfg; /* device configuration */
+ struct mutex pm_mutex; /* serialize access to open/suspend */
+ int opened; /* 1: opened, 0: closed */
+ struct urb *bt_urb; /* button usb request block */
+ struct bt_data *bt_data; /* button transferred data */
+ struct urb *tp_urb; /* trackpad usb request block */
+ struct tp_data *tp_data; /* trackpad transferred data */
+};
+
+/* logical dimensions */
+#define DIM_PRESSURE 256 /* maximum finger pressure */
+#define DIM_WIDTH 16 /* maximum finger width */
+#define DIM_X 1280 /* maximum trackpad x value */
+#define DIM_Y 800 /* maximum trackpad y value */
+
+/* logical signal quality */
+#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
+#define SN_WIDTH 100 /* width signal-to-noise ratio */
+#define SN_COORD 250 /* coordinate signal-to-noise ratio */
+
+/* device constants */
+static const struct bcm5974_config bcm5974_config_table[] = {
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
+ 0x84, sizeof(struct bt_data),
+ 0x81, sizeof(struct tp_data),
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4824, 5342 },
+ { DIM_Y, DIM_Y / SN_COORD, -172, 5820 }
+ },
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING2_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
+ 0x84, sizeof(struct bt_data),
+ 0x81, sizeof(struct tp_data),
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4824, 4824 },
+ { DIM_Y, DIM_Y / SN_COORD, -172, 4290 }
+ },
+ {}
+};
+
+/* return the device-specific configuration by device */
+static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev)
+{
+ u16 id = le16_to_cpu(udev->descriptor.idProduct);
+ const struct bcm5974_config *cfg;
+
+ for (cfg = bcm5974_config_table; cfg->ansi; ++cfg)
+ if (cfg->ansi == id || cfg->iso == id || cfg->jis == id)
+ return cfg;
+
+ return bcm5974_config_table;
+}
+
+/* convert 16-bit little endian to signed integer */
+static inline int raw2int(__le16 x)
+{
+ return (signed short)le16_to_cpu(x);
+}
+
+/* scale device data to logical dimensions (asserts devmin < devmax) */
+static inline int int2scale(const struct bcm5974_param *p, int x)
+{
+ return x * p->dim / (p->devmax - p->devmin);
+}
+
+/* all logical value ranges are [0,dim). */
+static inline int int2bound(const struct bcm5974_param *p, int x)
+{
+ int s = int2scale(p, x);
+
+ return clamp_val(s, 0, p->dim - 1);
+}
+
+/* setup which logical events to report */
+static void setup_events_to_report(struct input_dev *input_dev,
+ const struct bcm5974_config *cfg)
+{
+ __set_bit(EV_ABS, input_dev->evbit);
+
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, cfg->p.dim, cfg->p.fuzz, 0);
+ input_set_abs_params(input_dev, ABS_TOOL_WIDTH,
+ 0, cfg->w.dim, cfg->w.fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X,
+ 0, cfg->x.dim, cfg->x.fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, cfg->y.dim, cfg->y.fuzz, 0);
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+}
+
+/* report button data as logical button state */
+static int report_bt_state(struct bcm5974 *dev, int size)
+{
+ if (size != sizeof(struct bt_data))
+ return -EIO;
+
+ input_report_key(dev->input, BTN_LEFT, dev->bt_data->button);
+ input_sync(dev->input);
+
+ return 0;
+}
+
+/* report trackpad data as logical trackpad state */
+static int report_tp_state(struct bcm5974 *dev, int size)
+{
+ const struct bcm5974_config *c = &dev->cfg;
+ const struct tp_finger *f = dev->tp_data->finger;
+ struct input_dev *input = dev->input;
+ const int fingers = (size - 26) / 28;
+ int p = 0, w, x, y, n = 0;
+
+ if (size < 26 || (size - 26) % 28 != 0)
+ return -EIO;
+
+ if (fingers) {
+ p = raw2int(f->force_major);
+ w = raw2int(f->size_major);
+ x = raw2int(f->abs_x);
+ y = raw2int(f->abs_y);
+ n = p > 0 ? fingers : 0;
+
+ dprintk(9,
+ "bcm5974: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n",
+ p, w, x, y, n);
+
+ input_report_abs(input, ABS_TOOL_WIDTH, int2bound(&c->w, w));
+ input_report_abs(input, ABS_X, int2bound(&c->x, x - c->x.devmin));
+ input_report_abs(input, ABS_Y, int2bound(&c->y, c->y.devmax - y));
+ }
+
+ input_report_abs(input, ABS_PRESSURE, int2bound(&c->p, p));
+
+ input_report_key(input, BTN_TOOL_FINGER, n == 1);
+ input_report_key(input, BTN_TOOL_DOUBLETAP, n == 2);
+ input_report_key(input, BTN_TOOL_TRIPLETAP, n > 2);
+
+ input_sync(input);
+
+ return 0;
+}
+
+/* Wellspring initialization constants */
+#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
+#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
+#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
+#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
+#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
+
+static int bcm5974_wellspring_mode(struct bcm5974 *dev)
+{
+ char *data = kmalloc(8, GFP_KERNEL);
+ int retval = 0, size;
+
+ if (!data) {
+ err("bcm5974: out of memory");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ /* read configuration */
+ size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
+ BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+
+ if (size != 8) {
+ err("bcm5974: could not read from device");
+ retval = -EIO;
+ goto out;
+ }
+
+ /* apply the mode switch */
+ data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE;
+
+ /* write configuration */
+ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
+ BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+
+ if (size != 8) {
+ err("bcm5974: could not write to device");
+ retval = -EIO;
+ goto out;
+ }
+
+ dprintk(2, "bcm5974: switched to wellspring mode.\n");
+
+ out:
+ kfree(data);
+ return retval;
+}
+
+static void bcm5974_irq_button(struct urb *urb)
+{
+ struct bcm5974 *dev = urb->context;
+ int error;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -EOVERFLOW:
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ dbg("bcm5974: button urb shutting down: %d", urb->status);
+ return;
+ default:
+ dbg("bcm5974: button urb status: %d", urb->status);
+ goto exit;
+ }
+
+ if (report_bt_state(dev, dev->bt_urb->actual_length))
+ dprintk(1, "bcm5974: bad button package, length: %d\n",
+ dev->bt_urb->actual_length);
+
+exit:
+ error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC);
+ if (error)
+ err("bcm5974: button urb failed: %d", error);
+}
+
+static void bcm5974_irq_trackpad(struct urb *urb)
+{
+ struct bcm5974 *dev = urb->context;
+ int error;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -EOVERFLOW:
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ dbg("bcm5974: trackpad urb shutting down: %d", urb->status);
+ return;
+ default:
+ dbg("bcm5974: trackpad urb status: %d", urb->status);
+ goto exit;
+ }
+
+ /* control response ignored */
+ if (dev->tp_urb->actual_length == 2)
+ goto exit;
+
+ if (report_tp_state(dev, dev->tp_urb->actual_length))
+ dprintk(1, "bcm5974: bad trackpad package, length: %d\n",
+ dev->tp_urb->actual_length);
+
+exit:
+ error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC);
+ if (error)
+ err("bcm5974: trackpad urb failed: %d", error);
+}
+
+/*
+ * The Wellspring trackpad, like many recent Apple trackpads, share
+ * the usb device with the keyboard. Since keyboards are usually
+ * handled by the HID system, the device ends up being handled by two
+ * modules. Setting up the device therefore becomes slightly
+ * complicated. To enable multitouch features, a mode switch is
+ * required, which is usually applied via the control interface of the
+ * device. It can be argued where this switch should take place. In
+ * some drivers, like appletouch, the switch is made during
+ * probe. However, the hid module may also alter the state of the
+ * device, resulting in trackpad malfunction under certain
+ * circumstances. To get around this problem, there is at least one
+ * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to
+ * recieve a reset_resume request rather than the normal resume.
+ * Since the implementation of reset_resume is equal to mode switch
+ * plus start_traffic, it seems easier to always do the switch when
+ * starting traffic on the device.
+ */
+static int bcm5974_start_traffic(struct bcm5974 *dev)
+{
+ if (bcm5974_wellspring_mode(dev)) {
+ dprintk(1, "bcm5974: mode switch failed\n");
+ goto error;
+ }
+
+ if (usb_submit_urb(dev->bt_urb, GFP_KERNEL))
+ goto error;
+
+ if (usb_submit_urb(dev->tp_urb, GFP_KERNEL))
+ goto err_kill_bt;
+
+ return 0;
+
+err_kill_bt:
+ usb_kill_urb(dev->bt_urb);
+error:
+ return -EIO;
+}
+
+static void bcm5974_pause_traffic(struct bcm5974 *dev)
+{
+ usb_kill_urb(dev->tp_urb);
+ usb_kill_urb(dev->bt_urb);
+}
+
+/*
+ * The code below implements open/close and manual suspend/resume.
+ * All functions may be called in random order.
+ *
+ * Opening a suspended device fails with EACCES - permission denied.
+ *
+ * Failing a resume leaves the device resumed but closed.
+ */
+static int bcm5974_open(struct input_dev *input)
+{
+ struct bcm5974 *dev = input_get_drvdata(input);
+ int error;
+
+ error = usb_autopm_get_interface(dev->intf);
+ if (error)
+ return error;
+
+ mutex_lock(&dev->pm_mutex);
+
+ error = bcm5974_start_traffic(dev);
+ if (!error)
+ dev->opened = 1;
+
+ mutex_unlock(&dev->pm_mutex);
+
+ if (error)
+ usb_autopm_put_interface(dev->intf);
+
+ return error;
+}
+
+static void bcm5974_close(struct input_dev *input)
+{
+ struct bcm5974 *dev = input_get_drvdata(input);
+
+ mutex_lock(&dev->pm_mutex);
+
+ bcm5974_pause_traffic(dev);
+ dev->opened = 0;
+
+ mutex_unlock(&dev->pm_mutex);
+
+ usb_autopm_put_interface(dev->intf);
+}
+
+static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message)
+{
+ struct bcm5974 *dev = usb_get_intfdata(iface);
+
+ mutex_lock(&dev->pm_mutex);
+
+ if (dev->opened)
+ bcm5974_pause_traffic(dev);
+
+ mutex_unlock(&dev->pm_mutex);
+
+ return 0;
+}
+
+static int bcm5974_resume(struct usb_interface *iface)
+{
+ struct bcm5974 *dev = usb_get_intfdata(iface);
+ int error = 0;
+
+ mutex_lock(&dev->pm_mutex);
+
+ if (dev->opened)
+ error = bcm5974_start_traffic(dev);
+
+ mutex_unlock(&dev->pm_mutex);
+
+ return error;
+}
+
+static int bcm5974_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(iface);
+ const struct bcm5974_config *cfg;
+ struct bcm5974 *dev;
+ struct input_dev *input_dev;
+ int error = -ENOMEM;
+
+ /* find the product index */
+ cfg = bcm5974_get_config(udev);
+
+ /* allocate memory for our device state and initialize it */
+ dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!dev || !input_dev) {
+ err("bcm5974: out of memory");
+ goto err_free_devs;
+ }
+
+ dev->udev = udev;
+ dev->intf = iface;
+ dev->input = input_dev;
+ dev->cfg = *cfg;
+ mutex_init(&dev->pm_mutex);
+
+ /* setup urbs */
+ dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->bt_urb)
+ goto err_free_devs;
+
+ dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->tp_urb)
+ goto err_free_bt_urb;
+
+ dev->bt_data = usb_buffer_alloc(dev->udev,
+ dev->cfg.bt_datalen, GFP_KERNEL,
+ &dev->bt_urb->transfer_dma);
+ if (!dev->bt_data)
+ goto err_free_urb;
+
+ dev->tp_data = usb_buffer_alloc(dev->udev,
+ dev->cfg.tp_datalen, GFP_KERNEL,
+ &dev->tp_urb->transfer_dma);
+ if (!dev->tp_data)
+ goto err_free_bt_buffer;
+
+ usb_fill_int_urb(dev->bt_urb, udev,
+ usb_rcvintpipe(udev, cfg->bt_ep),
+ dev->bt_data, dev->cfg.bt_datalen,
+ bcm5974_irq_button, dev, 1);
+
+ usb_fill_int_urb(dev->tp_urb, udev,
+ usb_rcvintpipe(udev, cfg->tp_ep),
+ dev->tp_data, dev->cfg.tp_datalen,
+ bcm5974_irq_trackpad, dev, 1);
+
+ /* create bcm5974 device */
+ usb_make_path(udev, dev->phys, sizeof(dev->phys));
+ strlcat(dev->phys, "/input0", sizeof(dev->phys));
+
+ input_dev->name = "bcm5974";
+ input_dev->phys = dev->phys;
+ usb_to_input_id(dev->udev, &input_dev->id);
+ input_dev->dev.parent = &iface->dev;
+
+ input_set_drvdata(input_dev, dev);
+
+ input_dev->open = bcm5974_open;
+ input_dev->close = bcm5974_close;
+
+ setup_events_to_report(input_dev, cfg);
+
+ error = input_register_device(dev->input);
+ if (error)
+ goto err_free_buffer;
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(iface, dev);
+
+ return 0;
+
+err_free_buffer:
+ usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+err_free_bt_buffer:
+ usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
+err_free_urb:
+ usb_free_urb(dev->tp_urb);
+err_free_bt_urb:
+ usb_free_urb(dev->bt_urb);
+err_free_devs:
+ usb_set_intfdata(iface, NULL);
+ input_free_device(input_dev);
+ kfree(dev);
+ return error;
+}
+
+static void bcm5974_disconnect(struct usb_interface *iface)
+{
+ struct bcm5974 *dev = usb_get_intfdata(iface);
+
+ usb_set_intfdata(iface, NULL);
+
+ input_unregister_device(dev->input);
+ usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+ usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
+ usb_free_urb(dev->tp_urb);
+ usb_free_urb(dev->bt_urb);
+ kfree(dev);
+}
+
+static struct usb_driver bcm5974_driver = {
+ .name = "bcm5974",
+ .probe = bcm5974_probe,
+ .disconnect = bcm5974_disconnect,
+ .suspend = bcm5974_suspend,
+ .resume = bcm5974_resume,
+ .reset_resume = bcm5974_resume,
+ .id_table = bcm5974_table,
+ .supports_autosuspend = 1,
+};
+
+static int __init bcm5974_init(void)
+{
+ return usb_register(&bcm5974_driver);
+}
+
+static void __exit bcm5974_exit(void)
+{
+ usb_deregister(&bcm5974_driver);
+}
+
+module_init(bcm5974_init);
+module_exit(bcm5974_exit);
+
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 3392901..72cf5e3 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -9,7 +9,6 @@
*/
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 66bafe3..692a79e 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -1,10 +1,11 @@
#ifndef _I8042_SPARCIO_H
#define _I8042_SPARCIO_H
+#include <linux/of_device.h>
+
#include <asm/io.h>
#include <asm/oplib.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
static int i8042_kbd_irq = -1;
static int i8042_aux_irq = -1;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index fe732a5..3282b74 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -394,6 +394,13 @@ static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
},
+ {
+ .ident = "Acer TravelMate 4280",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 0ed044d..7650078 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -269,8 +269,8 @@ static int xps2_setup(struct device *dev, struct resource *regs_res,
* we have the PS2 in a good state */
out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET);
- dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%08X, irq=%d\n",
- drvdata->phys_addr, (u32)drvdata->base_address, drvdata->irq);
+ dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%p, irq=%d\n",
+ drvdata->phys_addr, drvdata->base_address, drvdata->irq);
serio = &drvdata->serio;
serio->id.type = SERIO_8042;
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index b9b7a98..7df0228 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -64,7 +64,6 @@ Scott Hill shill@gtcocalcomp.com
#include <asm/byteorder.h>
-#include <linux/version.h>
#include <linux/usb/input.h>
/* Version with a Major number of 2 is for kernel inclusion only. */
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 6e60a97..25287e8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -249,29 +249,26 @@ config TOUCHSCREEN_WM97XX
config TOUCHSCREEN_WM9705
bool "WM9705 Touchscreen interface support"
depends on TOUCHSCREEN_WM97XX
+ default y
help
- Say Y here if you have a Wolfson Microelectronics WM9705
- touchscreen controller connected to your system.
-
- If unsure, say N.
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9705 touchscreen controller.
config TOUCHSCREEN_WM9712
bool "WM9712 Touchscreen interface support"
depends on TOUCHSCREEN_WM97XX
+ default y
help
- Say Y here if you have a Wolfson Microelectronics WM9712
- touchscreen controller connected to your system.
-
- If unsure, say N.
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9712 touchscreen controller.
config TOUCHSCREEN_WM9713
bool "WM9713 Touchscreen interface support"
depends on TOUCHSCREEN_WM97XX
+ default y
help
- Say Y here if you have a Wolfson Microelectronics WM9713 touchscreen
- controller connected to your system.
-
- If unsure, say N.
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9713 touchscreen controller.
config TOUCHSCREEN_WM97XX_MAINSTONE
tristate "WM97xx Mainstone accelerated touch"
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 283f93a..37a555f 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index c1cd99d..504ca11 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -173,7 +173,7 @@ static int migor_ts_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_X, 95, 955, 0, 0);
input_set_abs_params(input, ABS_Y, 85, 935, 0, 0);
- input->name = client->driver_name;
+ input->name = client->name;
input->id.bustype = BUS_I2C;
input->dev.parent = &client->dev;
@@ -192,7 +192,7 @@ static int migor_ts_probe(struct i2c_client *client,
goto err1;
error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW,
- client->driver_name, priv);
+ client->name, priv);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
goto err2;
@@ -224,12 +224,19 @@ static int migor_ts_remove(struct i2c_client *client)
return 0;
}
+static const struct i2c_device_id migor_ts_id[] = {
+ { "migor_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, migor_ts);
+
static struct i2c_driver migor_ts_driver = {
.driver = {
.name = "migor_ts",
},
.probe = migor_ts_probe,
.remove = migor_ts_remove,
+ .id_table = migor_ts_id,
};
static int __init migor_ts_init(void)
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
index 978e1a1..372efbc 100644
--- a/drivers/input/touchscreen/wm9705.c
+++ b/drivers/input/touchscreen/wm9705.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 4c5d85a..c8bb1e7 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index 8384587..781ee83 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index cdc24ad..d589ab0 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -37,7 +37,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 37344aa..a661bbd 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -98,6 +98,10 @@ static u32 lg_get_features(struct virtio_device *vdev)
return features;
}
+/* The virtio core takes the features the Host offers, and copies the
+ * ones supported by the driver into the vdev->features array. Once
+ * that's all sorted out, this routine is called so we can tell the
+ * Host which features we understand and accept. */
static void lg_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
@@ -108,6 +112,10 @@ static void lg_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* The vdev->feature array is a Linux bitmask: this isn't the
+ * same as a the simple array of bits used by lguest devices
+ * for features. So we do this slow, manual conversion which is
+ * completely general. */
memset(out_features, 0, desc->feature_len);
bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++) {
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index d93500f..81d0c60 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -108,9 +108,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
}
/*:*/
-/*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We
- * could probably try to grab batches of pages here as an optimization
- * (ie. pre-faulting). :*/
+/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
+ * an optimization (ie. pre-faulting). :*/
/*H:350 This routine takes a page number given by the Guest and converts it to
* an actual, physical page number. It can fail for several reasons: the
@@ -123,19 +122,13 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
struct page *page;
- /* This value indicates failure. */
- unsigned long ret = -1UL;
- /* get_user_pages() is a complex interface: it gets the "struct
- * vm_area_struct" and "struct page" assocated with a range of pages.
- * It also needs the task's mmap_sem held, and is not very quick.
- * It returns the number of pages it got. */
- down_read(&current->mm->mmap_sem);
- if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
- 1, write, 1, &page, NULL) == 1)
- ret = page_to_pfn(page);
- up_read(&current->mm->mmap_sem);
- return ret;
+ /* gup me one page at this address please! */
+ if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
+ return page_to_pfn(page);
+
+ /* This value indicates failure. */
+ return -1UL;
}
/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
@@ -174,7 +167,7 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
/*H:460 And to complete the chain, release_pte() looks like this: */
static void release_pte(pte_t pte)
{
- /* Remember that get_user_pages() took a reference to the page, in
+ /* Remember that get_user_pages_fast() took a reference to the page, in
* get_pfn()? We have to put it back now. */
if (pte_flags(pte) & _PAGE_PRESENT)
put_page(pfn_to_page(pte_pfn(pte)));
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c7aae66..8cfadc5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2393,6 +2393,8 @@ static void analyze_sbs(mddev_t * mddev)
}
+static void md_safemode_timeout(unsigned long data);
+
static ssize_t
safe_delay_show(mddev_t *mddev, char *page)
{
@@ -2432,9 +2434,12 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
if (msec == 0)
mddev->safemode_delay = 0;
else {
+ unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
+ if (mddev->safemode_delay < old_delay)
+ md_safemode_timeout((unsigned long)mddev);
}
return len;
}
@@ -4634,6 +4639,11 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
*/
if (mddev->sync_thread)
return -EBUSY;
+ if (mddev->bitmap)
+ /* Sorry, cannot grow a bitmap yet, just remove it,
+ * grow, and re-add.
+ */
+ return -EBUSY;
rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
@@ -5993,7 +6003,7 @@ static int remove_and_add_spares(mddev_t *mddev)
}
}
- if (mddev->degraded) {
+ if (mddev->degraded && ! mddev->ro) {
rdev_for_each(rdev, rtmp, mddev) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
@@ -6067,6 +6077,8 @@ void md_check_recovery(mddev_t *mddev)
flush_signals(current);
}
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
if ( ! (
(mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
@@ -6080,6 +6092,15 @@ void md_check_recovery(mddev_t *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (mddev->ro) {
+ /* Only thing we do on a ro array is remove
+ * failed devices.
+ */
+ remove_and_add_spares(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ goto unlock;
+ }
+
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
@@ -6117,7 +6138,8 @@ void md_check_recovery(mddev_t *mddev)
/* resync has finished, collect result */
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev))
@@ -6169,6 +6191,7 @@ void md_check_recovery(mddev_t *mddev)
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -6232,7 +6255,11 @@ static int md_notify_reboot(struct notifier_block *this,
for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
- do_md_stop (mddev, 1, 0);
+ /* Force a switch to readonly even array
+ * appears to still be in use. Hence
+ * the '100'.
+ */
+ do_md_stop (mddev, 1, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d41bebb..e34cd0e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -76,11 +76,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
kfree(r10_bio);
}
+/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
-#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+/* amount of memory to reserve for resync requests */
+#define RESYNC_WINDOW (1024*1024)
+/* maximum number of concurrent requests, memory permitting */
+#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
/*
* When performing a resync, we need to read and compare, so
@@ -690,7 +692,6 @@ static int flush_pending_writes(conf_t *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
static void raise_barrier(conf_t *conf, int force)
{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 40e9396..224de02 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2568,10 +2568,10 @@ static bool handle_stripe5(struct stripe_head *sh)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2588,8 +2588,14 @@ static bool handle_stripe5(struct stripe_head *sh)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -2832,10 +2838,10 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (dev->written)
s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ if (blocked_rdev == NULL &&
+ rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
blocked_rdev = rdev;
atomic_inc(&rdev->nr_pending);
- break;
}
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -2853,9 +2859,16 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
rcu_read_unlock();
if (unlikely(blocked_rdev)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ if (s.syncing || s.expanding || s.expanded ||
+ s.to_write || s.written) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto unlock;
+ }
+ /* There is nothing for the blocked_rdev to block */
+ rdev_dec_pending(blocked_rdev, conf->mddev);
+ blocked_rdev = NULL;
}
+
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -4446,6 +4459,9 @@ static int raid5_check_reshape(mddev_t *mddev)
return -EINVAL; /* Cannot shrink array or change level yet */
if (mddev->delta_disks == 0)
return 0; /* nothing to do */
+ if (mddev->bitmap)
+ /* Cannot grow a bitmap yet */
+ return -EBUSY;
/* Can only proceed if there are plenty of stripe_heads.
* We need a minimum of one full stripe,, and for sensible progress
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c6408a6..bc2a807 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -16,7 +16,6 @@
*
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/gpio.h>
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index e7a3fe5..c6c77a5 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -192,6 +192,9 @@ static struct quirk_entry *quirks;
static void set_quirks(void)
{
+ if (!interface)
+ return;
+
if (quirks->mailled)
interface->capability |= ACER_CAP_MAILLED;
@@ -803,11 +806,30 @@ static acpi_status get_u32(u32 *value, u32 cap)
static acpi_status set_u32(u32 value, u32 cap)
{
+ acpi_status status;
+
if (interface->capability & cap) {
switch (interface->type) {
case ACER_AMW0:
return AMW0_set_u32(value, cap, interface);
case ACER_AMW0_V2:
+ if (cap == ACER_CAP_MAILLED)
+ return AMW0_set_u32(value, cap, interface);
+
+ /*
+ * On some models, some WMID methods don't toggle
+ * properly. For those cases, we want to run the AMW0
+ * method afterwards to be certain we've really toggled
+ * the device state.
+ */
+ if (cap == ACER_CAP_WIRELESS ||
+ cap == ACER_CAP_BLUETOOTH) {
+ status = WMID_set_u32(value, cap, interface);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ return AMW0_set_u32(value, cap, interface);
+ }
case ACER_WMID:
return WMID_set_u32(value, cap, interface);
default:
@@ -1218,6 +1240,8 @@ static int __init acer_wmi_init(void)
return -ENODEV;
}
+ set_quirks();
+
if (platform_driver_register(&acer_platform_driver)) {
printk(ACER_ERR "Unable to register platform driver.\n");
goto error_platform_register;
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index 9e8d79e..facdb98 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -553,9 +553,9 @@ static void eeepc_hwmon_exit(void)
hwmon = eeepc_hwmon_device;
if (!hwmon)
return ;
- hwmon_device_unregister(hwmon);
sysfs_remove_group(&hwmon->kobj,
&hwmon_attribute_group);
+ hwmon_device_unregister(hwmon);
eeepc_hwmon_device = NULL;
}
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom_93cx6.c
index ea55654..15b1780 100644
--- a/drivers/misc/eeprom_93cx6.c
+++ b/drivers/misc/eeprom_93cx6.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <linux/eeprom_93cx6.h>
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 4251018..a78f70d 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -279,7 +279,7 @@ struct gru_stats_s {
#if defined CONFIG_IA64
#define VADDR_HI_BIT 64
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
-#elif defined __x86_64
+#elif defined CONFIG_X86_64
#define VADDR_HI_BIT 48
#define GRUREGION(addr) (0) /* ZZZ could do better */
#else
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 7c994e1..ae16d84 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -595,8 +595,9 @@ static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id,
- int size, enum s3c2410_dma_buffresult result)
+static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
+ void *buf_id, int size,
+ enum s3c2410_dma_buffresult result)
{
struct s3cmci_host *host = buf_id;
unsigned long iflags;
@@ -740,8 +741,8 @@ request_done:
mmc_request_done(host->mmc, mrq);
}
-
-void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source)
+static void s3cmci_dma_setup(struct s3cmci_host *host,
+ enum s3c2410_dmasrc source)
{
static enum s3c2410_dmasrc last_source = -1;
static int setup_ok;
@@ -1003,8 +1004,9 @@ static void s3cmci_send_request(struct mmc_host *mmc)
enable_irq(host->irq);
}
-static int s3cmci_card_present(struct s3cmci_host *host)
+static int s3cmci_card_present(struct mmc_host *mmc)
{
+ struct s3cmci_host *host = mmc_priv(mmc);
struct s3c24xx_mci_pdata *pdata = host->pdata;
int ret;
@@ -1023,7 +1025,7 @@ static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->cmd_is_stop = 0;
host->mrq = mrq;
- if (s3cmci_card_present(host) == 0) {
+ if (s3cmci_card_present(mmc) == 0) {
dbg(host, dbg_err, "%s: no medium present\n", __func__);
host->mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
@@ -1138,6 +1140,7 @@ static struct mmc_host_ops s3cmci_ops = {
.request = s3cmci_request,
.set_ios = s3cmci_set_ios,
.get_ro = s3cmci_get_ro,
+ .get_cd = s3cmci_card_present,
};
static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1206,7 +1209,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
}
host->base = ioremap(host->mem->start, RESSIZE(host->mem));
- if (host->base == 0) {
+ if (!host->base) {
dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
ret = -EINVAL;
goto probe_free_mem_region;
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f99e9f7..1df44d9 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -29,7 +29,6 @@
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/scatterlist.h>
-#include <linux/version.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 6400248..917cf8d 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -19,7 +19,7 @@
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
-#include <asm/plat-orion/orion_nand.h>
+#include <plat/orion_nand.h>
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4b4cb2b..4a11296 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -822,14 +822,14 @@ config ULTRA32
will be called smc-ultra32.
config BFIN_MAC
- tristate "Blackfin 527/536/537 on-chip mac support"
- depends on NET_ETHERNET && (BF527 || BF537 || BF536)
+ tristate "Blackfin on-chip MAC support"
+ depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537)
select CRC32
select MII
select PHYLIB
select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
help
- This is the driver for blackfin on-chip mac device. Say Y if you want it
+ This is the driver for Blackfin on-chip mac device. Say Y if you want it
compiled into the kernel. This driver is also available as a module
( = code which can be inserted in and removed from the running kernel
whenever you want). The module will be called bfin_mac.
@@ -1172,7 +1172,7 @@ config ETH16I
config NE2000
tristate "NE2000/NE1000 support"
- depends on NET_ISA || (Q40 && m) || M32R || TOSHIBA_RBTX4927 || TOSHIBA_RBTX4938
+ depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX
select CRC32
---help---
If you have a network (Ethernet) card of this type, say Y and read
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index e4483de..66de80b 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -52,7 +52,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 020771b..e2d702b 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -551,7 +551,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
phys = dma_map_single(&dev->dev, skb->data,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(phys)) {
+ if (dma_mapping_error(&dev->dev, phys)) {
dev_kfree_skb(skb);
skb = NULL;
}
@@ -698,7 +698,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(phys)) {
+ if (dma_mapping_error(&dev->dev, phys)) {
#ifdef __ARMEB__
dev_kfree_skb(skb);
#else
@@ -883,7 +883,7 @@ static int init_queues(struct port *port)
desc->buf_len = MAX_MRU;
desc->data = dma_map_single(&port->netdev->dev, data,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(desc->data)) {
+ if (dma_mapping_error(&port->netdev->dev, desc->data)) {
free_buffer(buff);
return -EIO;
}
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index cdc3b85..619c658 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -355,7 +355,7 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
- WAKE_MCAST | WAKE_BCAST | WAKE_MCAST))
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
adapter->wol = 0;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 82d7be1..7685b99 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2232,10 +2232,11 @@ static int atl1e_resume(struct pci_dev *pdev)
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
err = atl1e_request_irq(adapter);
if (err)
return err;
+ }
atl1e_reset_hw(&adapter->hw);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index e6a7bb7..e23ce77 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3022,7 +3022,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SG;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
- netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX;
/*
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index cb8be49..5ee1b05 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -807,7 +807,7 @@ err_out:
static int au1000_init(struct net_device *dev)
{
struct au1000_private *aup = (struct au1000_private *) dev->priv;
- u32 flags;
+ unsigned long flags;
int i;
u32 control;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 0b4adf4..a886a4b 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -554,7 +554,7 @@ static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_lock_irqsave(&ax->mii_lock, flags);
mii_ethtool_gset(&ax->mii, cmd);
- spin_lock_irqsave(&ax->mii_lock, flags);
+ spin_unlock_irqrestore(&ax->mii_lock, flags);
return 0;
}
@@ -567,7 +567,7 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_lock_irqsave(&ax->mii_lock, flags);
rc = mii_ethtool_sset(&ax->mii, cmd);
- spin_lock_irqsave(&ax->mii_lock, flags);
+ spin_unlock_irqrestore(&ax->mii_lock, flags);
return rc;
}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 5ebde67..2486a65 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -35,8 +35,8 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
-#ifdef NETIF_F_HW_VLAN_TX
#include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
@@ -57,8 +57,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.7.9"
-#define DRV_MODULE_RELDATE "July 18, 2008"
+#define DRV_MODULE_VERSION "1.8.0"
+#define DRV_MODULE_RELDATE "Aug 14, 2008"
#define RUN_AT(x) (jiffies + (x))
@@ -2876,6 +2876,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ u16 vtag = 0;
+ int hw_vlan __maybe_unused = 0;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
@@ -2919,7 +2921,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (len <= bp->rx_copy_thresh) {
struct sk_buff *new_skb;
- new_skb = netdev_alloc_skb(bp->dev, len + 2);
+ new_skb = netdev_alloc_skb(bp->dev, len + 6);
if (new_skb == NULL) {
bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
@@ -2928,9 +2930,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
/* aligned copy */
skb_copy_from_linear_data_offset(skb,
- BNX2_RX_OFFSET - 2,
- new_skb->data, len + 2);
- skb_reserve(new_skb, 2);
+ BNX2_RX_OFFSET - 6,
+ new_skb->data, len + 6);
+ skb_reserve(new_skb, 6);
skb_put(new_skb, len);
bnx2_reuse_rx_skb(bp, rxr, skb,
@@ -2941,6 +2943,25 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
goto next_rx;
+ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+ !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = rx_hdr->l2_fhdr_vlan_tag;
+#ifdef BCM_VLAN
+ if (bp->vlgrp)
+ hw_vlan = 1;
+ else
+#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, 4);
+
+ memmove(ve, skb->data + 4, ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ len += 4;
+ }
+ }
+
skb->protocol = eth_type_trans(skb, bp->dev);
if ((len > (bp->dev->mtu + ETH_HLEN)) &&
@@ -2962,10 +2983,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
#ifdef BCM_VLAN
- if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- rx_hdr->l2_fhdr_vlan_tag);
- }
+ if (hw_vlan)
+ vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
else
#endif
netif_receive_skb(skb);
@@ -3237,10 +3256,10 @@ bnx2_set_rx_mode(struct net_device *dev)
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
#ifdef BCM_VLAN
- if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
#else
- if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
#endif
if (dev->flags & IFF_PROMISC) {
@@ -5963,10 +5982,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
+#ifdef BCM_VLAN
if (bp->vlgrp && vlan_tx_tag_present(skb)) {
vlan_tag_flags |=
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
}
+#endif
if ((mss = skb_shinfo(skb)->gso_size)) {
u32 tcp_opt_len, ip_tcp_len;
struct iphdr *iph;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4bf4f7b..a14dba1 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -40,20 +40,20 @@
#define DP(__mask, __fmt, __args...) do { \
if (bp->msglevel & (__mask)) \
printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev?(bp->dev->name):"?", ##__args); \
+ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* errors debug print */
#define BNX2X_DBG_ERR(__fmt, __args...) do { \
if (bp->msglevel & NETIF_MSG_PROBE) \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev?(bp->dev->name):"?", ##__args); \
+ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* for errors (never masked) */
#define BNX2X_ERR(__fmt, __args...) do { \
printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev?(bp->dev->name):"?", ##__args); \
+ bp->dev ? (bp->dev->name) : "?", ##__args); \
} while (0)
/* before we have a dev->name use dev_info() */
@@ -120,16 +120,8 @@
#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
-#define NIG_WR(reg, val) REG_WR(bp, reg, val)
-#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val)
-#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
-
-
-#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
-
-#define for_each_nondefault_queue(bp, var) \
- for (var = 1; var < bp->num_queues; var++)
-#define is_multi(bp) (bp->num_queues > 1)
+#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
/* fast path */
@@ -163,7 +155,7 @@ struct sw_rx_page {
#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
-/* RX_SGE_CNT is promissed to be a power of 2 */
+/* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1)
@@ -258,8 +250,7 @@ struct bnx2x_fastpath {
unsigned long tx_pkt,
rx_pkt,
- rx_calls,
- rx_alloc_failed;
+ rx_calls;
/* TPA related */
struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -275,6 +266,15 @@ struct bnx2x_fastpath {
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define BNX2X_HAS_TX_WORK(fp) \
+ ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
+ (fp->tx_pkt_prod != fp->tx_pkt_cons))
+
+#define BNX2X_HAS_RX_WORK(fp) \
+ (fp->rx_comp_cons != rx_cons_sb)
+
+#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
+
/* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -317,7 +317,7 @@ struct bnx2x_fastpath {
#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
-/* This is needed for determening of last_max */
+/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
#define __SGE_MASK_SET_BIT(el, bit) \
@@ -386,20 +386,28 @@ struct bnx2x_fastpath {
#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
(TPA_TYPE_START | TPA_TYPE_END))
-#define BNX2X_RX_SUM_OK(cqe) \
- (!(cqe->fast_path_cqe.status_flags & \
- (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
+#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_IP_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+
+#define BNX2X_L4_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+
+#define BNX2X_RX_CSUM_OK(cqe) \
+ (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
#define BNX2X_RX_SUM_FIX(cqe) \
((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
(1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
-#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
-
#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
@@ -647,6 +655,8 @@ struct bnx2x_eth_stats {
u32 brb_drop_hi;
u32 brb_drop_lo;
+ u32 brb_truncate_hi;
+ u32 brb_truncate_lo;
u32 jabber_packets_received;
@@ -663,6 +673,9 @@ struct bnx2x_eth_stats {
u32 mac_discard;
u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
};
#define STATS_OFFSET32(stat_name) \
@@ -753,7 +766,6 @@ struct bnx2x {
u16 def_att_idx;
u32 attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
- u32 aeu_mask;
u32 nig_mask;
/* slow path ring */
@@ -772,7 +784,7 @@ struct bnx2x {
u8 stats_pending;
u8 set_mac_pending;
- /* End of fileds used in the performance code paths */
+ /* End of fields used in the performance code paths */
int panic;
int msglevel;
@@ -794,9 +806,6 @@ struct bnx2x {
#define BP_FUNC(bp) (bp->func)
#define BP_E1HVN(bp) (bp->func >> 1)
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
-/* assorted E1HVN */
-#define IS_E1HMF(bp) (bp->e1hmf != 0)
-#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
int pm_cap;
int pcie_cap;
@@ -821,6 +830,7 @@ struct bnx2x {
u32 mf_config;
u16 e1hov;
u8 e1hmf;
+#define IS_E1HMF(bp) (bp->e1hmf != 0)
u8 wol;
@@ -836,7 +846,6 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
- u32 stats_ticks;
u32 lin_cnt;
int state;
@@ -852,6 +861,7 @@ struct bnx2x {
#define BNX2X_STATE_ERROR 0xf000
int num_queues;
+#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0
@@ -902,10 +912,17 @@ struct bnx2x {
};
+#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
+
+#define for_each_nondefault_queue(bp, var) \
+ for (var = 1; var < bp->num_queues; var++)
+#define is_multi(bp) (bp->num_queues > 1)
+
+
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED_SHIFT 16
-#define BNX2X_NUM_STATS 39
+#define BNX2X_NUM_STATS 42
#define BNX2X_NUM_TESTS 8
#define BNX2X_MAC_LOOPBACK 0
@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* resolution of the rate shaping timer - 100 usec */
#define RS_PERIODIC_TIMEOUT_USEC 100
/* resolution of fairness algorithm in usecs -
- coefficient for clauclating the actuall t fair */
+ coefficient for calculating the actual t fair */
#define T_FAIR_COEF 10000000
/* number of bytes in single QM arbitration cycle -
- coeffiecnt for calculating the fairness timer */
+ coefficient for calculating the fairness timer */
#define QM_ARB_BYTES 40000
#define FAIR_MEM 2
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e3da7f6..192fa98 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -9,165 +9,171 @@
#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0x7000 : 0x1000)
+ (IS_E1H_OFFSET ? 0x7000 : 0x1000)
#define CSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \
- * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \
- * 0x4)))
+ (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
+ ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
+ 0x40) + (index * 0x4)))
#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \
- * 0x100)) : (0x1900 + (function * 0x40)))
+ (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
+ ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \
- * 0x100)) : (0x1908 + (function * 0x40)))
+ (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
+ ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
#define CSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x11e8 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
#define CSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
+ (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1400 + (port * 0x280) + (cpu_id * 0x28)))
#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1408 + (port * 0x280) + (cpu_id * 0x28)))
#define CSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \
+ (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
(function * 0x8)))
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
- (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff)
+ (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0xa000 : 0x1000)
+ (IS_E1H_OFFSET ? 0xa000 : 0x1000)
#define TSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
- (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \
- (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
+ (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \
+ : (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \
- 0x4)))
+ (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
+ 0x28) + (index * 0x4)))
#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1400 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1408 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \
+ (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
(function * 0x8)))
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
- (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \
+ (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
(function * 0x38)))
#define TSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
#define TSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+ (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
- (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \
+ (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
(function * 0x80)))
#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
- (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \
+ (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
(function * 0x38)))
+#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
+ (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
+ 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38)))
#define TSTORM_RX_PRODS_OFFSET(port, client_id) \
- (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \
- (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
+ (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \
+ : (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
#define TSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \
+ (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
(function * 0x8)))
-#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20)
-#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10)
-#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200)
+#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20)
+#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
+#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
#define USTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0x8000 : 0x1000)
+ (IS_E1H_OFFSET ? 0x8000 : 0x1000)
#define USTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
- (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
+ (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
(0x5450 + (port * 0x1c8) + (clientId * 0x18)))
#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \
- 0x4)))
+ (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \
+ 0x28) + (index * 0x4)))
#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1900 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1900 + (function * 0x28)))
#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1908 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1908 + (function * 0x28)))
#define USTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x2448 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
#define USTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
+ (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
- (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
+ (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
(0x5448 + (port * 0x1c8) + (clientId * 0x18)))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \
+ (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \
(function * 0x8)))
#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
- (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
+ (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
(index * 0x4)))
#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1400 + (port * 0x280) + (cpu_id * 0x28)))
#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
- (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
+ (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x1408 + (port * 0x280) + (cpu_id * 0x28)))
#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
- (IS_E1H_OFFSET? 0x9000 : 0x1000)
+ (IS_E1H_OFFSET ? 0x9000 : 0x1000)
#define XSTORM_ASSERT_LIST_OFFSET(idx) \
- (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
+ (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
- (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
+ (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
- (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \
- 0x4)))
+ (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
+ 0x28) + (index * 0x4)))
#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1400 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
- (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \
- * 0xa0)) : (0x1408 + (function * 0x28)))
+ (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
+ ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
#define XSTORM_E1HOV_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff)
+ (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff)
#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \
+ (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
(function * 0x8)))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \
+ (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \
(function * 0x70)))
#define XSTORM_FUNCTION_MODE_OFFSET \
- (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff)
+ (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff)
#define XSTORM_HC_BTR_OFFSET(port) \
- (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+ (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
+#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
+ (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
+ 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \
+ (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \
(function * 0x70)))
#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \
+ (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
(function * 0x10)))
#define XSTORM_SPQ_PROD_OFFSET(function) \
- (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \
+ (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
(function * 0x10)))
#define XSTORM_STATS_FLAGS_OFFSET(function) \
- (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \
+ (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
(function * 0x8)))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index d3e8198..efd7644 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1268,7 +1268,7 @@ struct doorbell {
/*
- * IGU driver acknowlegement register
+ * IGU driver acknowledgement register
*/
struct igu_ack_register {
#if defined(__BIG_ENDIAN)
@@ -1882,7 +1882,7 @@ struct timers_block_context {
};
/*
- * structure for easy accessability to assembler
+ * structure for easy accessibility to assembler
*/
struct eth_tx_bd_flags {
u8 as_bitfield;
@@ -2044,7 +2044,7 @@ struct eth_context {
/*
- * ethernet doorbell
+ * Ethernet doorbell
*/
struct eth_tx_doorbell {
#if defined(__BIG_ENDIAN)
@@ -2256,7 +2256,7 @@ struct ramrod_data {
};
/*
- * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits)
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
*/
union eth_ramrod_data {
struct ramrod_data general;
@@ -2330,7 +2330,7 @@ struct spe_hdr {
};
/*
- * ethernet slow path element
+ * Ethernet slow path element
*/
union eth_specific_data {
u8 protocol_data[8];
@@ -2343,7 +2343,7 @@ union eth_specific_data {
};
/*
- * ethernet slow path element
+ * Ethernet slow path element
*/
struct eth_spe {
struct spe_hdr hdr;
@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
/*
- * common flag to indicate existance of TPA.
+ * common flag to indicate existence of TPA.
*/
struct tstorm_eth_tpa_exist {
#if defined(__BIG_ENDIAN)
@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
};
/*
- * Eth statistics query sturcture for the eth_stats_quesry ramrod
+ * Eth statistics query structure for the eth_stats_query ramrod
*/
struct eth_stats_query {
struct xstorm_common_stats xstorm_common;
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 4c77507..130927cf 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -72,26 +72,26 @@
struct raw_op {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 raw_data;
};
struct op_read {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 pad;
};
struct op_write {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 val;
};
struct op_string_write {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
#ifdef __LITTLE_ENDIAN
u16 data_off;
u16 data_len;
@@ -102,8 +102,8 @@ struct op_string_write {
};
struct op_zero {
- u32 op :8;
- u32 offset :24;
+ u32 op:8;
+ u32 offset:24;
u32 len;
};
@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
/*********************************************************
There are different blobs for each PRAM section.
In addition, each blob write operation is divided into a few operations
- in order to decrease the amount of phys. contigious buffer needed.
+ in order to decrease the amount of phys. contiguous buffer needed.
Thus, when we select a blob the address may be with some offset
from the beginning of PRAM section.
The same holds for the INT_TABLE sections.
@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
len = op->str_wr.data_len;
data = data_base + op->str_wr.data_off;
- /* carefull! it must be in order */
+ /* careful! it must be in order */
if (unlikely(op_type > OP_WB)) {
/* If E1 only */
@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
return crc_res;
}
-/* regiesers addresses are not in order
+/* registers addresses are not in order
so these arrays help simplify the code */
static const int cm_start[E1H_FUNC_MAX][9] = {
{MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
index 6301905..9755bf6 100644
--- a/drivers/net/bnx2x_init_values.h
+++ b/drivers/net/bnx2x_init_values.h
@@ -901,31 +901,28 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298},
{OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a},
{OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e},
+ {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c},
{OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa},
{OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000},
{OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000},
- {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e},
+ {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c},
{OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba},
{OP_ZP_E1, USEM_REG_PRAM, 0x311c0000},
{OP_ZP_E1H, USEM_REG_PRAM, 0x31070000},
@@ -933,11 +930,11 @@ static const struct raw_op init_ops[] = {
{OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42},
{OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919},
{OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906},
- {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0},
+ {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e},
{OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d},
{OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc},
-#define USEM_COMMON_END 790
-#define USEM_PORT0_START 790
+#define USEM_COMMON_END 787
+#define USEM_PORT0_START 787
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa},
@@ -950,44 +947,27 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
-#define USEM_PORT0_END 838
-#define USEM_PORT1_START 838
+#define USEM_PORT0_END 818
+#define USEM_PORT1_START 818
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa},
@@ -1000,76 +980,59 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96},
{OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20},
+ {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
- {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
-#define USEM_PORT1_END 886
-#define USEM_FUNC0_START 886
+#define USEM_PORT1_END 849
+#define USEM_FUNC0_START 849
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2},
-#define USEM_FUNC0_END 888
-#define USEM_FUNC1_START 888
+#define USEM_FUNC0_END 851
+#define USEM_FUNC1_START 851
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2},
-#define USEM_FUNC1_END 890
-#define USEM_FUNC2_START 890
+#define USEM_FUNC1_END 853
+#define USEM_FUNC2_START 853
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2},
-#define USEM_FUNC2_END 892
-#define USEM_FUNC3_START 892
+#define USEM_FUNC2_END 855
+#define USEM_FUNC3_START 855
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2},
-#define USEM_FUNC3_END 894
-#define USEM_FUNC4_START 894
+#define USEM_FUNC3_END 857
+#define USEM_FUNC4_START 857
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2},
-#define USEM_FUNC4_END 896
-#define USEM_FUNC5_START 896
+#define USEM_FUNC4_END 859
+#define USEM_FUNC5_START 859
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2},
-#define USEM_FUNC5_END 898
-#define USEM_FUNC6_START 898
+#define USEM_FUNC5_END 861
+#define USEM_FUNC6_START 861
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2},
-#define USEM_FUNC6_END 900
-#define USEM_FUNC7_START 900
+#define USEM_FUNC6_END 863
+#define USEM_FUNC7_START 863
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
{OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2},
-#define USEM_FUNC7_END 902
-#define CSEM_COMMON_START 902
+#define USEM_FUNC7_END 865
+#define CSEM_COMMON_START 865
{OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
{OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
{OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1128,29 +1091,29 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2},
+ {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240},
{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be},
{OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa},
+ {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8},
{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de},
{OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba},
+ {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8},
{OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee},
{OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000},
{OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000},
- {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca},
+ {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8},
{OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe},
{OP_ZP_E1, CSEM_REG_PRAM, 0x32580000},
{OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000},
{OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96},
{OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f},
- {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc},
+ {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca},
{OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300},
-#define CSEM_COMMON_END 981
-#define CSEM_PORT0_START 981
+#define CSEM_COMMON_END 944
+#define CSEM_PORT0_START 944
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
@@ -1163,8 +1126,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
-#define CSEM_PORT0_END 993
-#define CSEM_PORT1_START 993
+#define CSEM_PORT0_END 956
+#define CSEM_PORT1_START 956
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
@@ -1177,43 +1140,43 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
{OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
-#define CSEM_PORT1_END 1005
-#define CSEM_FUNC0_START 1005
+#define CSEM_PORT1_END 968
+#define CSEM_FUNC0_START 968
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
-#define CSEM_FUNC0_END 1007
-#define CSEM_FUNC1_START 1007
+#define CSEM_FUNC0_END 970
+#define CSEM_FUNC1_START 970
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
-#define CSEM_FUNC1_END 1009
-#define CSEM_FUNC2_START 1009
+#define CSEM_FUNC1_END 972
+#define CSEM_FUNC2_START 972
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
-#define CSEM_FUNC2_END 1011
-#define CSEM_FUNC3_START 1011
+#define CSEM_FUNC2_END 974
+#define CSEM_FUNC3_START 974
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
-#define CSEM_FUNC3_END 1013
-#define CSEM_FUNC4_START 1013
+#define CSEM_FUNC3_END 976
+#define CSEM_FUNC4_START 976
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
-#define CSEM_FUNC4_END 1015
-#define CSEM_FUNC5_START 1015
+#define CSEM_FUNC4_END 978
+#define CSEM_FUNC5_START 978
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
-#define CSEM_FUNC5_END 1017
-#define CSEM_FUNC6_START 1017
+#define CSEM_FUNC5_END 980
+#define CSEM_FUNC6_START 980
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
-#define CSEM_FUNC6_END 1019
-#define CSEM_FUNC7_START 1019
+#define CSEM_FUNC6_END 982
+#define CSEM_FUNC7_START 982
{OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
{OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
-#define CSEM_FUNC7_END 1021
-#define XPB_COMMON_START 1021
+#define CSEM_FUNC7_END 984
+#define XPB_COMMON_START 984
{OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
-#define XPB_COMMON_END 1022
-#define DQ_COMMON_START 1022
+#define XPB_COMMON_END 985
+#define DQ_COMMON_START 985
{OP_WR, DORQ_REG_MODE_ACT, 0x2},
{OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
{OP_WR, DORQ_REG_OUTST_REQ, 0x4},
@@ -1232,8 +1195,8 @@ static const struct raw_op init_ops[] = {
{OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
{OP_WR, DORQ_REG_REGN, 0x7c1004},
{OP_WR, DORQ_REG_IF_EN, 0xf},
-#define DQ_COMMON_END 1040
-#define TIMERS_COMMON_START 1040
+#define DQ_COMMON_END 1003
+#define TIMERS_COMMON_START 1003
{OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
{OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
{OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
@@ -1256,14 +1219,14 @@ static const struct raw_op init_ops[] = {
{OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
{OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
{OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
-#define TIMERS_COMMON_END 1062
-#define TIMERS_PORT0_START 1062
+#define TIMERS_COMMON_END 1025
+#define TIMERS_PORT0_START 1025
{OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
-#define TIMERS_PORT0_END 1063
-#define TIMERS_PORT1_START 1063
+#define TIMERS_PORT0_END 1026
+#define TIMERS_PORT1_START 1026
{OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
-#define TIMERS_PORT1_END 1064
-#define XSDM_COMMON_START 1064
+#define TIMERS_PORT1_END 1027
+#define XSDM_COMMON_START 1027
{OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
{OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
{OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
@@ -1311,8 +1274,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
{OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
{OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
-#define XSDM_COMMON_END 1111
-#define QM_COMMON_START 1111
+#define XSDM_COMMON_END 1074
+#define QM_COMMON_START 1074
{OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
{OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
{OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
@@ -1613,8 +1576,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
{OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
{OP_WR, QM_REG_CMINTEN, 0xff},
-#define QM_COMMON_END 1411
-#define PBF_COMMON_START 1411
+#define QM_COMMON_END 1374
+#define PBF_COMMON_START 1374
{OP_WR, PBF_REG_INIT, 0x1},
{OP_WR, PBF_REG_INIT_P4, 0x1},
{OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
@@ -1622,20 +1585,20 @@ static const struct raw_op init_ops[] = {
{OP_WR, PBF_REG_INIT_P4, 0x0},
{OP_WR, PBF_REG_INIT, 0x0},
{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
-#define PBF_COMMON_END 1418
-#define PBF_PORT0_START 1418
+#define PBF_COMMON_END 1381
+#define PBF_PORT0_START 1381
{OP_WR, PBF_REG_INIT_P0, 0x1},
{OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
{OP_WR, PBF_REG_INIT_P0, 0x0},
{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
-#define PBF_PORT0_END 1422
-#define PBF_PORT1_START 1422
+#define PBF_PORT0_END 1385
+#define PBF_PORT1_START 1385
{OP_WR, PBF_REG_INIT_P1, 0x1},
{OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
{OP_WR, PBF_REG_INIT_P1, 0x0},
{OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
-#define PBF_PORT1_END 1426
-#define XCM_COMMON_START 1426
+#define PBF_PORT1_END 1389
+#define XCM_COMMON_START 1389
{OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
{OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
{OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
@@ -1670,7 +1633,7 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
{OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
{OP_ZR, XCM_REG_XX_TABLE, 0x12},
- {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce},
+ {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc},
{OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302},
{OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
{OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
@@ -1700,8 +1663,8 @@ static const struct raw_op init_ops[] = {
{OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
{OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
{OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
-#define XCM_COMMON_END 1490
-#define XCM_PORT0_START 1490
+#define XCM_COMMON_END 1453
+#define XCM_PORT0_START 1453
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1710,8 +1673,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
-#define XCM_PORT0_END 1498
-#define XCM_PORT1_START 1498
+#define XCM_PORT0_END 1461
+#define XCM_PORT1_START 1461
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1720,8 +1683,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
-#define XCM_PORT1_END 1506
-#define XCM_FUNC0_START 1506
+#define XCM_PORT1_END 1469
+#define XCM_FUNC0_START 1469
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1731,8 +1694,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC0_END 1515
-#define XCM_FUNC1_START 1515
+#define XCM_FUNC0_END 1478
+#define XCM_FUNC1_START 1478
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1742,8 +1705,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC1_END 1524
-#define XCM_FUNC2_START 1524
+#define XCM_FUNC1_END 1487
+#define XCM_FUNC2_START 1487
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1753,8 +1716,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC2_END 1533
-#define XCM_FUNC3_START 1533
+#define XCM_FUNC2_END 1496
+#define XCM_FUNC3_START 1496
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1764,8 +1727,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC3_END 1542
-#define XCM_FUNC4_START 1542
+#define XCM_FUNC3_END 1505
+#define XCM_FUNC4_START 1505
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1775,8 +1738,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC4_END 1551
-#define XCM_FUNC5_START 1551
+#define XCM_FUNC4_END 1514
+#define XCM_FUNC5_START 1514
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1786,8 +1749,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC5_END 1560
-#define XCM_FUNC6_START 1560
+#define XCM_FUNC5_END 1523
+#define XCM_FUNC6_START 1523
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1797,8 +1760,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
-#define XCM_FUNC6_END 1569
-#define XCM_FUNC7_START 1569
+#define XCM_FUNC6_END 1532
+#define XCM_FUNC7_START 1532
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
{OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
{OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1808,8 +1771,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
{OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
{OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
-#define XCM_FUNC7_END 1578
-#define XSEM_COMMON_START 1578
+#define XCM_FUNC7_END 1541
+#define XSEM_COMMON_START 1541
{OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
{OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
{OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1876,9 +1839,9 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321},
@@ -1886,29 +1849,29 @@ static const struct raw_op init_ops[] = {
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337},
{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339},
{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349},
{OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351},
{OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000},
{OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000},
- {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317},
+ {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315},
{OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359},
{OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000},
{OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000},
@@ -1918,10 +1881,10 @@ static const struct raw_op init_ops[] = {
{OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22},
{OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2},
{OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8},
- {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319},
+ {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317},
{OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b},
-#define XSEM_COMMON_END 1688
-#define XSEM_PORT0_START 1688
+#define XSEM_COMMON_END 1651
+#define XSEM_PORT0_START 1651
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c},
@@ -1934,7 +1897,7 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
@@ -1950,12 +1913,12 @@ static const struct raw_op init_ops[] = {
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
-#define XSEM_PORT0_END 1720
-#define XSEM_PORT1_START 1720
+#define XSEM_PORT0_END 1683
+#define XSEM_PORT1_START 1683
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c},
@@ -1968,7 +1931,7 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
@@ -1984,65 +1947,65 @@ static const struct raw_op init_ops[] = {
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f},
{OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
- {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d},
+ {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
{OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
-#define XSEM_PORT1_END 1752
-#define XSEM_FUNC0_START 1752
+#define XSEM_PORT1_END 1715
+#define XSEM_FUNC0_START 1715
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
-#define XSEM_FUNC0_END 1755
-#define XSEM_FUNC1_START 1755
+#define XSEM_FUNC0_END 1718
+#define XSEM_FUNC1_START 1718
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
-#define XSEM_FUNC1_END 1758
-#define XSEM_FUNC2_START 1758
+#define XSEM_FUNC1_END 1721
+#define XSEM_FUNC2_START 1721
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
-#define XSEM_FUNC2_END 1761
-#define XSEM_FUNC3_START 1761
+#define XSEM_FUNC2_END 1724
+#define XSEM_FUNC3_START 1724
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
-#define XSEM_FUNC3_END 1764
-#define XSEM_FUNC4_START 1764
+#define XSEM_FUNC3_END 1727
+#define XSEM_FUNC4_START 1727
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
-#define XSEM_FUNC4_END 1767
-#define XSEM_FUNC5_START 1767
+#define XSEM_FUNC4_END 1730
+#define XSEM_FUNC5_START 1730
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
-#define XSEM_FUNC5_END 1770
-#define XSEM_FUNC6_START 1770
+#define XSEM_FUNC5_END 1733
+#define XSEM_FUNC6_START 1733
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
-#define XSEM_FUNC6_END 1773
-#define XSEM_FUNC7_START 1773
+#define XSEM_FUNC6_END 1736
+#define XSEM_FUNC7_START 1736
{OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
{OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1},
{OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
-#define XSEM_FUNC7_END 1776
-#define CDU_COMMON_START 1776
+#define XSEM_FUNC7_END 1739
+#define CDU_COMMON_START 1739
{OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
{OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
{OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
{OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
- {OP_WB_E1, CDU_REG_L1TT, 0x200033f},
+ {OP_WB_E1, CDU_REG_L1TT, 0x200033d},
{OP_WB_E1H, CDU_REG_L1TT, 0x20003e1},
- {OP_WB_E1, CDU_REG_MATT, 0x20053f},
+ {OP_WB_E1, CDU_REG_MATT, 0x20053d},
{OP_WB_E1H, CDU_REG_MATT, 0x2805e1},
{OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
- {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f},
+ {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d},
{OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
-#define CDU_COMMON_END 1787
-#define DMAE_COMMON_START 1787
+#define CDU_COMMON_END 1750
+#define DMAE_COMMON_START 1750
{OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
{OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
{OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
@@ -2050,24 +2013,24 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
{OP_WR, DMAE_REG_PCI_IFEN, 0x1},
{OP_WR, DMAE_REG_GRC_IFEN, 0x1},
-#define DMAE_COMMON_END 1794
-#define PXP_COMMON_START 1794
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565},
+#define DMAE_COMMON_END 1757
+#define PXP_COMMON_START 1757
+ {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563},
{OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609},
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a},
+ {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568},
{OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e},
- {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f},
-#define PXP_COMMON_END 1799
-#define CFC_COMMON_START 1799
+ {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d},
+#define PXP_COMMON_END 1762
+#define CFC_COMMON_START 1762
{OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
{OP_WR, CFC_REG_CONTROL0, 0x10},
{OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
{OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
-#define CFC_COMMON_END 1803
-#define HC_COMMON_START 1803
+#define CFC_COMMON_END 1766
+#define HC_COMMON_START 1766
{OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
-#define HC_COMMON_END 1804
-#define HC_PORT0_START 1804
+#define HC_COMMON_END 1767
+#define HC_PORT0_START 1767
{OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
{OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
{OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2086,8 +2049,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_PORT0_END 1822
-#define HC_PORT1_START 1822
+#define HC_PORT0_END 1785
+#define HC_PORT1_START 1785
{OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
{OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
{OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2106,8 +2069,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_PORT1_END 1840
-#define HC_FUNC0_START 1840
+#define HC_PORT1_END 1803
+#define HC_FUNC0_START 1803
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2123,8 +2086,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC0_END 1855
-#define HC_FUNC1_START 1855
+#define HC_FUNC0_END 1818
+#define HC_FUNC1_START 1818
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2140,8 +2103,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC1_END 1870
-#define HC_FUNC2_START 1870
+#define HC_FUNC1_END 1833
+#define HC_FUNC2_START 1833
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2157,8 +2120,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC2_END 1885
-#define HC_FUNC3_START 1885
+#define HC_FUNC2_END 1848
+#define HC_FUNC3_START 1848
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2174,8 +2137,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC3_END 1900
-#define HC_FUNC4_START 1900
+#define HC_FUNC3_END 1863
+#define HC_FUNC4_START 1863
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2191,8 +2154,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC4_END 1915
-#define HC_FUNC5_START 1915
+#define HC_FUNC4_END 1878
+#define HC_FUNC5_START 1878
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2208,8 +2171,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC5_END 1930
-#define HC_FUNC6_START 1930
+#define HC_FUNC5_END 1893
+#define HC_FUNC6_START 1893
{OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
{OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2225,8 +2188,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
-#define HC_FUNC6_END 1945
-#define HC_FUNC7_START 1945
+#define HC_FUNC6_END 1908
+#define HC_FUNC7_START 1908
{OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
{OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
{OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2242,8 +2205,8 @@ static const struct raw_op init_ops[] = {
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
{OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
-#define HC_FUNC7_END 1960
-#define PXP2_COMMON_START 1960
+#define HC_FUNC7_END 1923
+#define PXP2_COMMON_START 1923
{OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340},
{OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
{OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
@@ -2361,8 +2324,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
{OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
{OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340},
-#define PXP2_COMMON_END 2077
-#define MISC_AEU_COMMON_START 2077
+#define PXP2_COMMON_END 2040
+#define MISC_AEU_COMMON_START 2040
{OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
{OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
{OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
@@ -2382,8 +2345,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
{OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
{OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
-#define MISC_AEU_COMMON_END 2096
-#define MISC_AEU_PORT0_START 2096
+#define MISC_AEU_COMMON_END 2059
+#define MISC_AEU_PORT0_START 2059
{OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
{OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
{OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
@@ -2416,8 +2379,8 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
{OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
{OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
-#define MISC_AEU_PORT0_END 2128
-#define MISC_AEU_PORT1_START 2128
+#define MISC_AEU_PORT0_END 2091
+#define MISC_AEU_PORT1_START 2091
{OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
{OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
{OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
@@ -2450,7 +2413,7 @@ static const struct raw_op init_ops[] = {
{OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
{OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
{OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
-#define MISC_AEU_PORT1_END 2160
+#define MISC_AEU_PORT1_END 2123
};
@@ -2560,103 +2523,92 @@ static const u32 init_data_e1[] = {
0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80,
0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280,
0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780,
- 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000,
- 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+ 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
- 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080,
- 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380,
- 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680,
- 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980,
- 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80,
- 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000,
- 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201,
- 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
+ 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180,
+ 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480,
+ 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780,
+ 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80,
+ 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80,
+ 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604,
+ 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000,
+ 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
- 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000,
- 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff,
+ 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
+ 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
+ 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
- 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
- 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
- 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
- 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
- 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
- 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
+ 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
+ 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
- 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c,
+ 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
- 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
+ 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
- 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
+ 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
+ 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
- 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
- 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c,
+ 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
- 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
- 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c,
+ 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
- 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
- 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
- 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
- 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
- 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
- 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a,
+ 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c,
+ 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
+ 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
+ 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
+ 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
+ 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
+ 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
+ 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
+ 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
+ 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
@@ -2678,16 +2630,27 @@ static const u32 init_data_e1[] = {
0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
- 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000,
- 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280,
- 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0,
- 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170,
- 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000,
- 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298,
- 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110,
- 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000,
- 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc,
- 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000
+ 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
+ 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
+ 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
+ 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
+ 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
+ 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
+ 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
+ 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
+ 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170,
+ 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000,
+ 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210,
+ 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250,
+ 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180,
+ 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000,
+ 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc,
+ 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc,
+ 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc,
+ 0xcccccccc, 0x00002000
};
static const u32 init_data_e1h[] = {
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff2743d..4ce7fe9 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -21,7 +21,6 @@
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mutex.h>
-#include <linux/version.h>
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
@@ -31,17 +30,16 @@
/********************************************************/
#define SUPPORT_CL73 0 /* Currently no */
-#define ETH_HLEN 14
+#define ETH_HLEN 14
#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
#define BMAC_CONTROL_RX_ENABLE 2
-#define MAX_MTU_SIZE 5000
/***********************************************************/
-/* Shortcut definitions */
+/* Shortcut definitions */
/***********************************************************/
#define NIG_STATUS_XGXS0_LINK10G \
@@ -80,12 +78,12 @@
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
-#define AUTONEG_PARALLEL \
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
-#define AUTONEG_SGMII_FIBER_AUTODET \
+#define AUTONEG_SGMII_FIBER_AUTODET \
SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
-#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
@@ -202,11 +200,10 @@ static void bnx2x_emac_init(struct link_params *params,
/* init emac - use read-modify-write */
/* self clear reset */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
timeout = 200;
- do
- {
+ do {
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
if (!timeout) {
@@ -214,18 +211,18 @@ static void bnx2x_emac_init(struct link_params *params,
return;
}
timeout--;
- }while (val & EMAC_MODE_RESET);
+ } while (val & EMAC_MODE_RESET);
/* Set mac address */
val = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
val = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
params->mac_addr[5]);
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
}
static u8 bnx2x_emac_enable(struct link_params *params,
@@ -286,7 +283,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
if (CHIP_REV_IS_SLOW(bp)) {
/* config GMII mode */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(EMAC_REG_EMAC_MODE,
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE,
(val | EMAC_MODE_PORT_GMII));
} else { /* ASIC */
/* pause enable/disable */
@@ -298,17 +295,19 @@ static u8 bnx2x_emac_enable(struct link_params *params,
EMAC_RX_MODE_FLOW_EN);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_EXT_PAUSE_EN);
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
if (vars->flow_ctrl & FLOW_CTRL_TX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_EXT_PAUSE_EN);
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
}
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
- EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
/* Set Loopback */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
@@ -316,10 +315,10 @@ static u8 bnx2x_emac_enable(struct link_params *params,
val |= 0x810;
else
val &= ~0x810;
- EMAC_WR(EMAC_REG_EMAC_MODE, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
/* enable emac for jumbo packets */
- EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
@@ -591,9 +590,9 @@ void bnx2x_link_status_update(struct link_params *params,
vars->flow_ctrl &= ~FLOW_CTRL_RX;
if (vars->phy_flags & PHY_XGXS_FLAG) {
- if (params->req_line_speed &&
- ((params->req_line_speed == SPEED_10) ||
- (params->req_line_speed == SPEED_100))) {
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
vars->phy_flags |= PHY_SGMII_FLAG;
} else {
vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -645,7 +644,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
- u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+ u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
/* Only if the bmac is out of reset */
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -670,7 +669,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u8 port = params->port;
u32 init_crd, crd;
u32 count = 1000;
- u32 pause = 0;
/* disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
@@ -693,33 +691,25 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return -EINVAL;
}
- if (flow_ctrl & FLOW_CTRL_RX)
- pause = 1;
- REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
- if (pause) {
+ if (flow_ctrl & FLOW_CTRL_RX ||
+ line_speed == SPEED_10 ||
+ line_speed == SPEED_100 ||
+ line_speed == SPEED_1000 ||
+ line_speed == SPEED_2500) {
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
/* update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
-
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
/* update init credit */
switch (line_speed) {
- case SPEED_10:
- case SPEED_100:
- case SPEED_1000:
- init_crd = thresh + 55 - 22;
- break;
-
- case SPEED_2500:
- init_crd = thresh + 138 - 22;
- break;
-
case SPEED_10000:
init_crd = thresh + 553 - 22;
break;
@@ -764,10 +754,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port)
emac_base = GRCBASE_EMAC0;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1;
+ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
break;
default:
- emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0;
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
break;
}
return emac_base;
@@ -1044,7 +1034,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
}
static void bnx2x_set_parallel_detection(struct link_params *params,
- u8 phy_flags)
+ u8 phy_flags)
{
struct bnx2x *bp = params->bp;
u16 control2;
@@ -1114,7 +1104,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
/* CL37 Autoneg Enabled */
- if (params->req_line_speed == SPEED_AUTO_NEG)
+ if (vars->line_speed == SPEED_AUTO_NEG)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
else /* CL37 Autoneg Disabled */
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
@@ -1132,7 +1122,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
- if (params->req_line_speed == SPEED_AUTO_NEG)
+ if (vars->line_speed == SPEED_AUTO_NEG)
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -1148,7 +1138,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
- if (params->req_line_speed == SPEED_AUTO_NEG) {
+ if (vars->line_speed == SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
@@ -1164,7 +1154,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
reg_val);
/* Enable Clause 73 Aneg */
- if ((params->req_line_speed == SPEED_AUTO_NEG) &&
+ if ((vars->line_speed == SPEED_AUTO_NEG) &&
(SUPPORT_CL73)) {
/* Enable BAM Station Manager */
@@ -1226,7 +1216,8 @@ static void bnx2x_set_autoneg(struct link_params *params,
}
/* program SerDes, forced speed */
-static void bnx2x_program_serdes(struct link_params *params)
+static void bnx2x_program_serdes(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
@@ -1248,28 +1239,35 @@ static void bnx2x_program_serdes(struct link_params *params)
/* program speed
- needed only if the speed is greater than 1G (2.5G or 10G) */
- if (!((params->req_line_speed == SPEED_1000) ||
- (params->req_line_speed == SPEED_100) ||
- (params->req_line_speed == SPEED_10))) {
- CL45_RD_OVER_CL22(bp, params->port,
+ CL45_RD_OVER_CL22(bp, params->port,
params->phy_addr,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
- reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
+ /* clearing the speed value before setting the right speed */
+ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+ if (!((vars->line_speed == SPEED_1000) ||
+ (vars->line_speed == SPEED_100) ||
+ (vars->line_speed == SPEED_10))) {
+
reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
- if (params->req_line_speed == SPEED_10000)
+ if (vars->line_speed == SPEED_10000)
reg_val |=
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
- if (params->req_line_speed == SPEED_13000)
+ if (vars->line_speed == SPEED_13000)
reg_val |=
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
- CL45_WR_OVER_CL22(bp, params->port,
+ }
+
+ CL45_WR_OVER_CL22(bp, params->port,
params->phy_addr,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, reg_val);
- }
+
}
static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
@@ -1295,48 +1293,49 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
MDIO_OVER_1G_UP3, 0);
}
-static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
- u32 *ieee_fc)
+static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
{
- struct bnx2x *bp = params->bp;
- /* for AN, we are always publishing full duplex */
- u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
/* resolve pause mode and advertisement
* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
switch (params->req_flow_ctrl) {
case FLOW_CTRL_AUTO:
- if (params->mtu <= MAX_MTU_SIZE) {
- an_adv |=
+ if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) {
+ *ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
} else {
- an_adv |=
+ *ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
}
break;
case FLOW_CTRL_TX:
- an_adv |=
+ *ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case FLOW_CTRL_RX:
case FLOW_CTRL_BOTH:
- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
break;
case FLOW_CTRL_NONE:
default:
- an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
break;
}
+}
- *ieee_fc = an_adv;
+static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
+ u32 ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ /* for AN, we are always publishing full duplex */
CL45_WR_OVER_CL22(bp, params->port,
params->phy_addr,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
}
static void bnx2x_restart_autoneg(struct link_params *params)
@@ -1382,7 +1381,8 @@ static void bnx2x_restart_autoneg(struct link_params *params)
}
}
-static void bnx2x_initialize_sgmii_process(struct link_params *params)
+static void bnx2x_initialize_sgmii_process(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
@@ -1406,7 +1406,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
control1);
/* if forced speed */
- if (!(params->req_line_speed == SPEED_AUTO_NEG)) {
+ if (!(vars->line_speed == SPEED_AUTO_NEG)) {
/* set speed, disable autoneg */
u16 mii_control;
@@ -1419,7 +1419,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
- switch (params->req_line_speed) {
+ switch (vars->line_speed) {
case SPEED_100:
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
@@ -1433,8 +1433,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
break;
default:
/* invalid speed for SGMII */
- DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
- params->req_line_speed);
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
break;
}
@@ -1460,20 +1460,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
*/
static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+{ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
vars->flow_ctrl = FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
vars->flow_ctrl = FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
vars->flow_ctrl = FLOW_CTRL_BOTH;
break;
@@ -1531,6 +1531,28 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
pause_result);
bnx2x_pause_resolve(vars, pause_result);
+ if (vars->flow_ctrl == FLOW_CTRL_NONE &&
+ ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
+ bnx2x_cl45_read(bp, port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+ bnx2x_pause_resolve(vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+ pause_result);
+ }
}
return ret;
}
@@ -1541,8 +1563,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
- u16 ld_pause; /* local driver */
- u16 lp_pause; /* link partner */
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
u16 pause_result;
vars->flow_ctrl = FLOW_CTRL_NONE;
@@ -1573,13 +1595,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
(bnx2x_ext_phy_resove_fc(params, vars))) {
return;
} else {
- vars->flow_ctrl = params->req_flow_ctrl;
- if (vars->flow_ctrl == FLOW_CTRL_AUTO) {
- if (params->mtu <= MAX_MTU_SIZE)
- vars->flow_ctrl = FLOW_CTRL_BOTH;
- else
- vars->flow_ctrl = FLOW_CTRL_TX;
- }
+ if (params->req_flow_ctrl == FLOW_CTRL_AUTO)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else
+ vars->flow_ctrl = params->req_flow_ctrl;
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
@@ -1590,6 +1609,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
+
u8 rc = 0;
vars->link_status = 0;
@@ -1690,7 +1710,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
vars->link_status |= LINK_STATUS_SERDES_LINK;
- if (params->req_line_speed == SPEED_AUTO_NEG) {
+ if ((params->req_line_speed == SPEED_AUTO_NEG) &&
+ ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
+ (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
vars->autoneg = AUTO_NEG_ENABLED;
if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
@@ -1705,18 +1729,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
}
if (vars->flow_ctrl & FLOW_CTRL_TX)
- vars->link_status |=
- LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
if (vars->flow_ctrl & FLOW_CTRL_RX)
- vars->link_status |=
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
} else { /* link_down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
- vars->line_speed = 0;
+
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = FLOW_CTRL_NONE;
vars->autoneg = AUTO_NEG_DISABLED;
@@ -1817,15 +1841,15 @@ static u8 bnx2x_emac_program(struct link_params *params,
}
/*****************************************************************************/
-/* External Phy section */
+/* External Phy section */
/*****************************************************************************/
-static void bnx2x_hw_reset(struct bnx2x *bp)
+static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
msleep(1);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_ext_phy_reset(struct link_params *params,
@@ -1854,10 +1878,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
/* HW reset */
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
@@ -1869,7 +1894,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Unset Low Power Mode and SW reset */
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
DP(NETIF_MSG_LINK, "XGXS 8072\n");
bnx2x_cl45_write(bp, params->port,
@@ -1887,19 +1913,14 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
DP(NETIF_MSG_LINK, "XGXS 8073\n");
- bnx2x_cl45_write(bp,
- params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
}
break;
@@ -1908,10 +1929,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ params->port);
/* HW reset */
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, params->port);
break;
@@ -1934,7 +1956,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
DP(NETIF_MSG_LINK, "SerDes 5482\n");
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, params->port);
break;
default:
@@ -2098,42 +2120,45 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
}
-static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
+static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
+ u8 ext_phy_addr)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = ((params->ext_phy_config &
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- u16 fw_ver1, fw_ver2, val;
- /* Need to wait 100ms after reset */
- msleep(100);
- /* Boot port from external ROM */
+ u16 fw_ver1, fw_ver2;
+ /* Boot port from external ROM */
/* EDC grst */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
/* ucode reboot and rst */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
0x008c);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
/* Release srst bit */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
@@ -2142,35 +2167,52 @@ static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
msleep(100);
/* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
- /* Only set bit 10 = 1 (Tx power down) */
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+}
+static void bnx2x_bcm807x_force_10G(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u8 ext_phy_addr = ((params->ext_phy_config &
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+
+ /* Force KR or KX */
bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10));
-
- msleep(600);
- /* Release bit 10 (Release Tx power down) */
+ MDIO_PMA_REG_CTRL,
+ 0x2040);
bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
-
+ MDIO_PMA_REG_10G_CTRL2,
+ 0x000b);
+ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_BCM_CTRL,
+ 0x0000);
+ bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CTRL,
+ 0x0000);
}
-
static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
{
struct bnx2x *bp = params->bp;
@@ -2236,32 +2278,51 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
}
-static void bnx2x_bcm807x_force_10G(struct link_params *params)
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct link_vars *vars)
{
+
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u16 cl37_val;
u8 ext_phy_addr = ((params->ext_phy_config &
PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* Force KR or KX */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 0x2040);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2,
- 0x000b);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_BCM_CTRL,
- 0x0000);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL,
- 0x0000);
+ MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
}
static void bnx2x_ext_phy_set_pause(struct link_params *params,
@@ -2282,13 +2343,16 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
- if (vars->ieee_fc &
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
- if (vars->ieee_fc &
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
val |=
MDIO_AN_REG_ADV_PAUSE_PAUSE;
@@ -2302,6 +2366,65 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE, val);
}
+
+static void bnx2x_init_internal_phy(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ u16 bank, rx_eq;
+
+ rx_eq = ((params->serdes_config &
+ PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
+ PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
+ for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
+ CL45_WR_OVER_CL22(bp, port,
+ params->phy_addr,
+ bank ,
+ MDIO_RX0_RX_EQ_BOOST,
+ ((rx_eq &
+ MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
+ MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
+ }
+
+ /* forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+ /* disable autoneg */
+ bnx2x_set_autoneg(params, vars);
+
+ /* program speed and duplex */
+ bnx2x_program_serdes(params, vars);
+
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisment(params);
+
+ /* program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisment(params,
+ vars->ieee_fc);
+
+ /* enable autoneg */
+ bnx2x_set_autoneg(params, vars);
+
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(params);
+ }
+
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
+
+ bnx2x_initialize_sgmii_process(params, vars);
+ }
+}
+
static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -2343,7 +2466,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
switch (ext_phy_type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "XGXS Direct\n");
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
@@ -2419,7 +2541,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FD,
+ MDIO_AN_REG_CL37_FC_LP,
0x0020);
/* Enable CL37 AN */
bnx2x_cl45_write(bp, params->port,
@@ -2458,54 +2580,43 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
rx_alarm_ctrl_val = 0x400;
lasi_ctrl_val = 0x0004;
} else {
- /* In 8073, port1 is directed through emac0 and
- * port0 is directed through emac1
- */
rx_alarm_ctrl_val = (1<<2);
- /*lasi_ctrl_val = 0x0005;*/
lasi_ctrl_val = 0x0004;
}
- /* Wait for soft reset to get cleared upto 1 sec */
- for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- &ctrl);
- if (!(ctrl & (1<<15)))
- break;
- msleep(1);
- }
- DP(NETIF_MSG_LINK,
- "807x control reg 0x%x (after %d ms)\n",
- ctrl, cnt);
+ /* enable LASI */
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM_CTRL,
+ rx_alarm_ctrl_val);
+
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LASI_CTRL,
+ lasi_ctrl_val);
+
+ bnx2x_8073_set_pause_cl37(params, vars);
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
bnx2x_bcm8072_external_rom_boot(params);
} else {
- bnx2x_bcm8073_external_rom_boot(params);
+
/* In case of 8073 with long xaui lines,
don't set the 8073 xaui low power*/
bnx2x_bcm8073_set_xaui_low_power_mode(params);
}
- /* enable LASI */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- rx_alarm_ctrl_val);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL,
- lasi_ctrl_val);
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ 0xca13,
+ &tmp1);
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
@@ -2519,12 +2630,21 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
/* If this is forced speed, set to KR or KX
* (all other are not supported)
*/
- if (!(params->req_line_speed == SPEED_AUTO_NEG)) {
- if (params->req_line_speed == SPEED_10000) {
- bnx2x_bcm807x_force_10G(params);
- DP(NETIF_MSG_LINK,
- "Forced speed 10G on 807X\n");
- break;
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_bcm807x_force_10G(params);
+ DP(NETIF_MSG_LINK,
+ "Forced speed 10G on 807X\n");
+ break;
+ } else {
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type, ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_BCM_CTRL,
+ 0x0002);
+ }
+ if (params->req_line_speed != SPEED_AUTO_NEG) {
+ if (params->req_line_speed == SPEED_10000) {
+ val = (1<<7);
} else if (params->req_line_speed ==
SPEED_2500) {
val = (1<<5);
@@ -2539,11 +2659,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
+ /* Note that 2.5G works only when
+ used with 1G advertisment */
if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
val |= (1<<5);
- DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
- /*val = ((1<<5)|(1<<7));*/
+ DP(NETIF_MSG_LINK,
+ "807x autoneg val = 0x%x\n", val);
}
bnx2x_cl45_write(bp, params->port,
@@ -2554,20 +2677,19 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- /* Disable 2.5Ghz */
+
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
0x8329, &tmp1);
-/* SUPPORT_SPEED_CAPABILITY
- (Due to the nature of the link order, its not
- possible to enable 2.5G within the autoneg
- capabilities)
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
-*/
- if (params->req_line_speed == SPEED_2500) {
+
+ if (((params->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (params->req_line_speed ==
+ SPEED_AUTO_NEG)) ||
+ (params->req_line_speed ==
+ SPEED_2500)) {
u16 phy_ver;
/* Allow 2.5G for A1 and above */
bnx2x_cl45_read(bp, params->port,
@@ -2575,49 +2697,53 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_addr,
MDIO_PMA_DEVAD,
0xc801, &phy_ver);
-
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
if (phy_ver > 0)
tmp1 |= 1;
else
tmp1 &= 0xfffe;
- }
- else
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
tmp1 &= 0xfffe;
+ }
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
0x8329, tmp1);
}
- /* Add support for CL37 (passive mode) I */
- bnx2x_cl45_write(bp, params->port,
+
+ /* Add support for CL37 (passive mode) II */
+
+ bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_CL73, 0x040c);
- /* Add support for CL37 (passive mode) II */
+ MDIO_AN_REG_CL37_FC_LD,
+ &tmp1);
+
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FD, 0x20);
+ MDIO_AN_REG_CL37_FC_LD, (tmp1 |
+ ((params->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
+
/* Add support for CL37 (passive mode) III */
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_AN_DEVAD,
MDIO_AN_REG_CL37_AN, 0x1000);
- /* Restart autoneg */
- msleep(500);
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-
- /* The SNR will improve about 2db by changing the
+ /* The SNR will improve about 2db by changing
BW and FEE main tap. Rest commands are executed
after link is up*/
- /* Change FFE main cursor to 5 in EDC register */
+ /*Change FFE main cursor to 5 in EDC register*/
if (bnx2x_8073_is_snr_needed(params))
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
@@ -2626,25 +2752,28 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
MDIO_PMA_REG_EDC_FFE_MAIN,
0xFB0C);
- /* Enable FEC (Forware Error Correction)
- Request in the AN */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, &tmp1);
+ /* Enable FEC (Forware Error Correction)
+ Request in the AN */
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV2, &tmp1);
- tmp1 |= (1<<15);
+ tmp1 |= (1<<15);
+
+ bnx2x_cl45_write(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV2, tmp1);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, tmp1);
}
bnx2x_ext_phy_set_pause(params, vars);
+ /* Restart autoneg */
+ msleep(500);
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
@@ -2701,10 +2830,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
}
} else { /* SerDes */
-/* ext_phy_addr = ((bp->ext_phy_config &
- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
-*/
+
ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
switch (ext_phy_type) {
case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
@@ -2726,7 +2852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u32 ext_phy_type;
@@ -2767,6 +2893,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PMA_REG_RX_SD, &rx_sd);
DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
ext_phy_link_up = (rx_sd & 0x1);
+ if (ext_phy_link_up)
+ vars->line_speed = SPEED_10000;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
@@ -2810,6 +2938,13 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
*/
ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
(val2 & (1<<1)));
+ if (ext_phy_link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ }
+
/* clear LASI indication*/
bnx2x_cl45_read(bp, params->port, ext_phy_type,
ext_phy_addr,
@@ -2820,6 +2955,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
{
+ u16 link_status = 0;
+ u16 an1000_status = 0;
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
bnx2x_cl45_read(bp, params->port,
@@ -2846,14 +2983,9 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_LASI_STATUS, &val1);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val2);
DP(NETIF_MSG_LINK,
- "8703 LASI status 0x%x->0x%x\n",
- val1, val2);
+ "8703 LASI status 0x%x\n",
+ val1);
}
/* clear the interrupt LASI status register */
@@ -2869,20 +3001,23 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PCS_REG_STATUS, &val1);
DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
val2, val1);
- /* Check the LASI */
+ /* Clear MSG-OUT */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &val2);
+ 0xca13,
+ &val1);
+
+ /* Check the LASI */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM,
- &val1);
- DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
- val2, val1);
+ MDIO_PMA_REG_RX_ALARM, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
/* Check the link status */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
@@ -2905,29 +3040,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
if (ext_phy_type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- u16 an1000_status = 0;
+
if (ext_phy_link_up &&
- (
- (params->req_line_speed != SPEED_10000)
- )) {
+ ((params->req_line_speed !=
+ SPEED_10000))) {
if (bnx2x_bcm8073_xaui_wa(params)
!= 0) {
ext_phy_link_up = 0;
break;
}
- bnx2x_cl45_read(bp, params->port,
+ }
+ bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
- MDIO_XS_DEVAD,
+ MDIO_AN_DEVAD,
0x8304,
&an1000_status);
- bnx2x_cl45_read(bp, params->port,
+ bnx2x_cl45_read(bp, params->port,
ext_phy_type,
ext_phy_addr,
- MDIO_XS_DEVAD,
+ MDIO_AN_DEVAD,
0x8304,
&an1000_status);
- }
+
/* Check the link status on 1.1.2 */
bnx2x_cl45_read(bp, params->port,
ext_phy_type,
@@ -2943,8 +3078,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
"an_link_status=0x%x\n",
val2, val1, an1000_status);
- ext_phy_link_up = (((val1 & 4) == 4) ||
- (an1000_status & (1<<1)));
+ ext_phy_link_up = (((val1 & 4) == 4) ||
+ (an1000_status & (1<<1)));
if (ext_phy_link_up &&
bnx2x_8073_is_snr_needed(params)) {
/* The SNR will improve about 2dbby
@@ -2968,8 +3103,74 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_PMA_REG_CDR_BANDWIDTH,
0x0333);
+
+ }
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_PMA_DEVAD,
+ 0xc820,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected,
+ bits 13..15--> link is down */
+ if ((link_status & (1<<2)) &&
+ (!(link_status & (1<<15)))) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 10G\n", params->port);
+ } else if ((link_status & (1<<1)) &&
+ (!(link_status & (1<<14)))) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 2.5G\n", params->port);
+ } else if ((link_status & (1<<0)) &&
+ (!(link_status & (1<<13)))) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 1G\n", params->port);
+ } else {
+ ext_phy_link_up = 0;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " is down\n", params->port);
+ }
+ } else {
+ /* See if 1G link is up for the 8072 */
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ 0x8304,
+ &an1000_status);
+ bnx2x_cl45_read(bp, params->port,
+ ext_phy_type,
+ ext_phy_addr,
+ MDIO_AN_DEVAD,
+ 0x8304,
+ &an1000_status);
+ if (an1000_status & (1<<1)) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 1G\n", params->port);
+ } else if (ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK,
+ "port %x: External link"
+ " up in 10G\n", params->port);
}
}
+
+
break;
}
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -3006,6 +3207,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
MDIO_AN_DEVAD,
MDIO_AN_REG_MASTER_STATUS,
&val2);
+ vars->line_speed = SPEED_10000;
DP(NETIF_MSG_LINK,
"SFX7101 AN status 0x%x->Master=%x\n",
val2,
@@ -3100,7 +3302,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
* link management
*/
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u16 is_10g)
+ struct link_vars *vars, u8 is_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -3181,7 +3383,8 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
}
-static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
+static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
+ u32 ext_phy_type)
{
u32 cnt = 0;
u16 ctrl = 0;
@@ -3192,12 +3395,14 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
/* take ext phy out of reset */
bnx2x_set_gpio(bp,
- MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_HIGH);
+ MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
bnx2x_set_gpio(bp,
- MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_HIGH);
+ MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
/* wait for 5ms */
msleep(5);
@@ -3205,7 +3410,7 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
for (cnt = 0; cnt < 1000; cnt++) {
msleep(1);
bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL,
@@ -3217,13 +3422,17 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
}
}
-static void bnx2x_turn_off_sf(struct bnx2x *bp)
+static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
{
/* put sf to reset */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW);
bnx2x_set_gpio(bp,
- MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_LOW);
+ MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_LOW,
+ port);
+ bnx2x_set_gpio(bp,
+ MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_LOW,
+ port);
}
u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
@@ -3253,7 +3462,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
/* Take ext phy out of reset */
if (!driver_loaded)
- bnx2x_turn_on_sf(bp, params->port, ext_phy_addr);
+ bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
+ ext_phy_type);
/* wait for 1ms */
msleep(1);
@@ -3276,11 +3486,16 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
version[4] = '\0';
if (!driver_loaded)
- bnx2x_turn_off_sf(bp);
+ bnx2x_turn_off_sf(bp, params->port);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
{
+ /* Take ext phy out of reset */
+ if (!driver_loaded)
+ bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
+ ext_phy_type);
+
bnx2x_cl45_read(bp, params->port, ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
@@ -3333,7 +3548,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
struct bnx2x *bp = params->bp;
if (is_10g) {
- u32 md_devad;
+ u32 md_devad;
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
@@ -3553,6 +3768,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
u16 hw_led_mode, u32 chip_id)
{
u8 rc = 0;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
speed, hw_led_mode);
@@ -3561,6 +3778,9 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
break;
case LED_MODE_OPER:
@@ -3572,6 +3792,10 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
LED_BLINK_RATE_VAL);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp & (~EMAC_LED_OVERRIDE)));
+
if (!CHIP_IS_E1H(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
@@ -3622,7 +3846,8 @@ static u8 bnx2x_link_initialize(struct link_params *params,
struct bnx2x *bp = params->bp;
u8 port = params->port;
u8 rc = 0;
-
+ u8 non_ext_phy;
+ u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
/* Activate the external PHY */
bnx2x_ext_phy_reset(params, vars);
@@ -3644,10 +3869,6 @@ static u8 bnx2x_link_initialize(struct link_params *params,
bnx2x_set_swap_lanes(params);
}
- /* Set Parallel Detect */
- if (params->req_line_speed == SPEED_AUTO_NEG)
- bnx2x_set_parallel_detection(params, vars->phy_flags);
-
if (vars->phy_flags & PHY_XGXS_FLAG) {
if (params->req_line_speed &&
((params->req_line_speed == SPEED_100) ||
@@ -3657,68 +3878,33 @@ static u8 bnx2x_link_initialize(struct link_params *params,
vars->phy_flags &= ~PHY_SGMII_FLAG;
}
}
+ /* In case of external phy existance, the line speed would be the
+ line speed linked up by the external phy. In case it is direct only,
+ then the line_speed during initialization will be equal to the
+ req_line_speed*/
+ vars->line_speed = params->req_line_speed;
- if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
- u16 bank, rx_eq;
-
- rx_eq = ((params->serdes_config &
- PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
- PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
+ bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
- DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
- for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
- bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
- CL45_WR_OVER_CL22(bp, port,
- params->phy_addr,
- bank ,
- MDIO_RX0_RX_EQ_BOOST,
- ((rx_eq &
- MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
- MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
- }
-
- /* forced speed requested? */
- if (params->req_line_speed != SPEED_AUTO_NEG) {
- DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
-
- /* disable autoneg */
- bnx2x_set_autoneg(params, vars);
-
- /* program speed and duplex */
- bnx2x_program_serdes(params);
- vars->ieee_fc =
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
-
- } else { /* AN_mode */
- DP(NETIF_MSG_LINK, "not SGMII, AN\n");
-
- /* AN enabled */
- bnx2x_set_brcm_cl37_advertisment(params);
-
- /* program duplex & pause advertisement (for aneg) */
- bnx2x_set_ieee_aneg_advertisment(params,
- &vars->ieee_fc);
-
- /* enable autoneg */
- bnx2x_set_autoneg(params, vars);
-
- /* enable and restart AN */
- bnx2x_restart_autoneg(params);
- }
-
- } else { /* SGMII mode */
- DP(NETIF_MSG_LINK, "SGMII\n");
-
- bnx2x_initialize_sgmii_process(params);
+ /* init ext phy and enable link state int */
+ non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
+ (params->loopback_mode == LOOPBACK_XGXS_10) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY));
+
+ if (non_ext_phy ||
+ (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
+ if (params->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_set_parallel_detection(params, vars->phy_flags);
+ bnx2x_init_internal_phy(params, vars);
}
- /* init ext phy and enable link state int */
- rc |= bnx2x_ext_phy_init(params, vars);
+ if (!non_ext_phy)
+ rc |= bnx2x_ext_phy_init(params, vars);
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
return rc;
@@ -3730,15 +3916,23 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
struct bnx2x *bp = params->bp;
u32 val;
- DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+ DP(NETIF_MSG_LINK, "Phy Initialization started \n");
DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
params->req_line_speed, params->req_flow_ctrl);
vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+
if (params->switch_cfg == SWITCH_CFG_1G)
vars->phy_flags = PHY_SERDES_FLAG;
else
vars->phy_flags = PHY_XGXS_FLAG;
+
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -3894,6 +4088,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
bnx2x_link_initialize(params, vars);
+ msleep(30);
bnx2x_link_int_enable(params);
}
return 0;
@@ -3943,39 +4138,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
/* HW reset */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
- } else {
-
- u8 ext_phy_addr = ((ext_phy_config &
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
-
- /* SW reset */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
-
- /* Set Low Power Mode */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<11);
-
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- DP(NETIF_MSG_LINK, "Setting 8073 port %d into"
+ } else if (ext_phy_type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
"low power mode\n",
port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
- }
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
}
}
/* reset the SerDes/XGXS */
@@ -3995,6 +4173,73 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
return 0;
}
+static u8 bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(bp, port, LED_MODE_OFF,
+ 0, params->hw_led_mode,
+ params->chip_id);
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* update shared memory */
+ vars->link_status = 0;
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* reset BigMac */
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ return 0;
+}
+
+static u8 bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g, u32 gp_status)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u8 rc = 0;
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ if (link_10g) {
+ bnx2x_bmac_enable(params, vars, 0);
+ bnx2x_set_led(bp, port, LED_MODE_OPER,
+ SPEED_10000, params->hw_led_mode,
+ params->chip_id);
+
+ } else {
+ bnx2x_emac_enable(params, vars, 0);
+ rc = bnx2x_emac_program(params, vars->line_speed,
+ vars->duplex);
+
+ /* AN complete? */
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
+ if (!(vars->phy_flags &
+ PHY_SGMII_FLAG))
+ bnx2x_set_sgmii_tx_driver(params);
+ }
+ }
+
+ /* PBF - link up */
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ return rc;
+}
/* This function should called upon link interrupt */
/* In case vars->link_up, driver needs to
1. Update the pbf
@@ -4012,10 +4257,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- u16 i;
u16 gp_status;
- u16 link_10g;
- u8 rc = 0;
+ u8 link_10g;
+ u8 ext_phy_link_up, rc = 0;
+ u32 ext_phy_type;
DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
port,
@@ -4031,15 +4276,16 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+ ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* avoid fast toggling */
- for (i = 0; i < 10; i++) {
- msleep(10);
- CL45_RD_OVER_CL22(bp, port, params->phy_addr,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
- }
+ /* Check external link change only for non-direct */
+ ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
+
+ /* Read gp_status */
+ CL45_RD_OVER_CL22(bp, port, params->phy_addr,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
rc = bnx2x_link_settings_status(params, vars, gp_status);
if (rc != 0)
@@ -4055,73 +4301,177 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
bnx2x_link_int_ack(params, vars, link_10g);
+ /* In case external phy link is up, and internal link is down
+ ( not initialized yet probably after link initialization, it needs
+ to be initialized.
+ Note that after link down-up as result of cable plug,
+ the xgxs link would probably become up again without the need to
+ initialize it*/
+
+ if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
+ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
+ (ext_phy_link_up && !vars->phy_link_up))
+ bnx2x_init_internal_phy(params, vars);
+
/* link is up only if both local phy and external phy are up */
- vars->link_up = (vars->phy_link_up &&
- bnx2x_ext_phy_is_link_up(params, vars));
+ vars->link_up = (ext_phy_link_up && vars->phy_link_up);
- if (!vars->phy_link_up &&
- REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) {
- bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ return rc;
+}
+
+static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+ u8 ext_phy_addr[PORT_MAX];
+ u16 val;
+ s8 port;
+
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Extract the ext phy address for the port */
+ u32 ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
+
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ ext_phy_addr[port] =
+ ((ext_phy_config &
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
+
+ /* Need to take the phy out of low power mode in order
+ to write to access its registers */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
}
- if (vars->link_up) {
- vars->link_status |= LINK_STATUS_LINK_UP;
- if (link_10g) {
- bnx2x_bmac_enable(params, vars, 0);
- bnx2x_set_led(bp, port, LED_MODE_OPER,
- SPEED_10000, params->hw_led_mode,
- params->chip_id);
+ /* Add delay of 150ms after reset */
+ msleep(150);
- } else {
- bnx2x_emac_enable(params, vars, 0);
- rc = bnx2x_emac_program(params, vars->line_speed,
- vars->duplex);
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u16 fw_ver1;
- /* AN complete? */
- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
- if (!(vars->phy_flags &
- PHY_SGMII_FLAG))
- bnx2x_set_sgmii_tx_driver(params);
- }
+ bnx2x_bcm8073_external_rom_boot(bp, port,
+ ext_phy_addr[port]);
+
+ bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ if (fw_ver1 == 0) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_common_init_phy port %x "
+ "fw Download failed\n", port);
+ return -EINVAL;
}
- /* PBF - link up */
- rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
+ /* Only set bit 10 = 1 (Tx power down) */
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+ /* Phase1 of TX_POWER_DOWN reset */
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
+ }
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
+ /* Toggle Transmitter: Power down and then up with 600ms
+ delay between */
+ msleep(600);
- } else { /* link down */
- DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port);
- bnx2x_set_led(bp, port, LED_MODE_OFF,
- 0, params->hw_led_mode,
- params->chip_id);
+ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Phase2 of POWER_DOWN_RESET*/
+ /* Release bit 10 (Release Tx power down) */
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
- /* indicate no mac active */
- vars->mac_type = MAC_TYPE_NONE;
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ msleep(15);
- /* update shared memory */
- vars->link_status = 0;
- bnx2x_update_mng(params, vars->link_status);
+ /* Read modify write the SPI-ROM version select register */
+ bnx2x_cl45_read(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ bnx2x_cl45_write(bp, port,
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ ext_phy_addr[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
- /* activate nig drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+ /* set GPIO2 back to LOW */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ }
+ return 0;
- /* reset BigMac */
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+}
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+ u8 rc = 0;
+ u32 ext_phy_type;
+
+ DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
+
+ /* Read the ext_phy_type for arbitrary port(0) */
+ ext_phy_type = XGXS_EXT_PHY_TYPE(
+ REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].external_phy_config)));
+
+ switch (ext_phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ {
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK,
+ "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
+ ext_phy_type);
+ break;
}
return rc;
}
+
+
static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
{
u16 val, cnt;
@@ -4154,7 +4504,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
}
#define RESERVED_SIZE 256
/* max application is 160K bytes - data at end of RAM */
-#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE
+#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
/* Header is 14 bytes */
#define HEADER_SIZE 14
@@ -4192,12 +4542,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
size = MAX_APP_SIZE+HEADER_SIZE;
}
DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
- DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
+ DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
/* Put the DSP in download mode by setting FLASH_CFG[2] to 1
and issuing a reset.*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH);
+ MISC_REGISTERS_GPIO_HIGH, port);
bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
@@ -4429,7 +4779,8 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
}
/* DSP Remove Download Mode */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_LOW, port);
bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
@@ -4437,7 +4788,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
for (cnt = 0; cnt < 100; cnt++)
msleep(5);
- bnx2x_hw_reset(bp);
+ bnx2x_hw_reset(bp, port);
for (cnt = 0; cnt < 100; cnt++)
msleep(5);
@@ -4473,7 +4824,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
MDIO_PMA_REG_7101_VER2,
&image_revision2);
- if (data[0x14e] != (image_revision2&0xFF) ||
+ if (data[0x14e] != (image_revision2&0xFF) ||
data[0x14f] != ((image_revision2&0xFF00)>>8) ||
data[0x150] != (image_revision1&0xFF) ||
data[0x151] != ((image_revision1&0xFF00)>>8)) {
@@ -4508,11 +4859,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
/* Take ext phy out of reset */
if (!driver_loaded)
- bnx2x_turn_on_sf(bp, port, ext_phy_addr);
+ bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
data, size);
if (!driver_loaded)
- bnx2x_turn_off_sf(bp);
+ bnx2x_turn_off_sf(bp, port);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 714d37a..86d54a1 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -55,14 +55,17 @@ struct link_params {
#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS_10 3
#define LOOPBACK_EXT_PHY 4
+#define LOOPBACK_EXT 5
u16 req_duplex;
u16 req_flow_ctrl;
+ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
u16 req_line_speed; /* Also determine AutoNeg */
/* Device parameters */
u8 mac_addr[6];
- u16 mtu;
+
/* shmem parameters */
@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
u8 phy_addr, u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
- and update the link vars accordinaly */
+ and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
- to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to
+ to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
u16 hw_led_mode, u32 chip_id);
@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
otherwise link is down*/
u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
+/* One-time initialization for external phy after power up */
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd..82deea0 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -44,7 +44,6 @@
#include <net/ip.h>
#include <net/tcp.h>
#include <net/checksum.h>
-#include <linux/version.h>
#include <net/ip6_checksum.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>
@@ -60,8 +59,8 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
-#define DRV_MODULE_VERSION "1.45.6"
-#define DRV_MODULE_RELDATE "2008/06/23"
+#define DRV_MODULE_VERSION "1.45.20"
+#define DRV_MODULE_RELDATE "2008/08/25"
#define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +75,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
+static int disable_tpa;
static int use_inta;
static int poll;
static int debug;
-static int disable_tpa;
-static int nomcp;
static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
static int use_multi;
+module_param(disable_tpa, int, 0);
module_param(use_inta, int, 0);
module_param(poll, int, 0);
module_param(debug, int, 0);
-module_param(disable_tpa, int, 0);
-module_param(nomcp, int, 0);
+MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
MODULE_PARM_DESC(poll, "use polling (for debug)");
MODULE_PARM_DESC(debug, "default debug msglevel");
-MODULE_PARM_DESC(nomcp, "ignore management CPU");
#ifdef BNX2X_MULTI
module_param(use_multi, int, 0);
@@ -237,17 +234,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
while (*wb_comp != DMAE_COMP_VAL) {
DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
- /* adjust delay for emulation/FPGA */
- if (CHIP_REV_IS_SLOW(bp))
- msleep(100);
- else
- udelay(5);
-
if (!cnt) {
BNX2X_ERR("dmae timeout!\n");
break;
}
cnt--;
+ /* adjust delay for emulation/FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(100);
+ else
+ udelay(5);
}
mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +306,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
while (*wb_comp != DMAE_COMP_VAL) {
- /* adjust delay for emulation/FPGA */
- if (CHIP_REV_IS_SLOW(bp))
- msleep(100);
- else
- udelay(5);
-
if (!cnt) {
BNX2X_ERR("dmae timeout!\n");
break;
}
cnt--;
+ /* adjust delay for emulation/FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(100);
+ else
+ udelay(5);
}
DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +498,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
int i;
u16 j, start, end;
+ bp->stats_state = STATS_STATE_DISABLED;
+ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
BNX2X_ERR("begin crash dump -----------------\n");
for_each_queue(bp, i) {
@@ -513,17 +511,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
" tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
- " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
- " rx_sge_prod(%x) last_max_sge(%x)\n",
- fp->rx_comp_prod, fp->rx_comp_cons,
- le16_to_cpu(*fp->rx_cons_sb),
- le16_to_cpu(*fp->rx_bd_cons_sb),
- fp->rx_sge_prod, fp->last_max_sge);
- BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
- " bd data(%x,%x) rx_alloc_failed(%lx)\n",
- fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
- hw_prods->bds_prod, fp->rx_alloc_failed);
+ BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
+ " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
+ " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
+ fp->rx_bd_prod, fp->rx_bd_cons,
+ le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
+ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+ BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
+ " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
+ " *sb_u_idx(%x) bd data(%x,%x)\n",
+ fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
+ fp->status_blk->c_status_block.status_block_index,
+ fp->fp_u_idx,
+ fp->status_blk->u_status_block.status_block_index,
+ hw_prods->packets_prod, hw_prods->bds_prod);
start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +554,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
j, rx_bd[1], rx_bd[0], sw_bd->skb);
}
- start = 0;
- end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
+ start = RX_SGE(fp->rx_sge_prod);
+ end = RX_SGE(fp->last_max_sge);
for (j = start; j < end; j++) {
u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +583,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
bnx2x_fw_dump(bp);
bnx2x_mc_assert(bp);
BNX2X_ERR("end crash dump -----------------\n");
-
- bp->stats_state = STATS_STATE_DISABLED;
- DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
}
static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +682,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
- u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
igu_ack.status_block_index = index;
@@ -694,9 +693,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
(op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
- DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
- (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
- REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
+ DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+ (*(u32 *)&igu_ack), hc_addr);
+ REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
}
static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +715,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
return rc;
}
-static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
-{
- u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
-
- if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
- rx_cons_sb++;
-
- if ((fp->rx_comp_cons != rx_cons_sb) ||
- (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
- (fp->tx_pkt_prod != fp->tx_pkt_cons))
- return 1;
-
- return 0;
-}
-
static u16 bnx2x_ack_int(struct bnx2x *bp)
{
- u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
- u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_SIMD_MASK);
+ u32 result = REG_RD(bp, hc_addr);
- DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
- result, BAR_IGU_INTMEM + igu_addr);
+ DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+ result, hc_addr);
-#ifdef IGU_DEBUG
-#warning IGU_DEBUG active
- if (result == 0) {
- BNX2X_ERR("read %x from IGU\n", result);
- REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
- }
-#endif
return result;
}
@@ -898,6 +876,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
netif_tx_lock(bp->dev);
if (netif_queue_stopped(bp->dev) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
(bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
netif_wake_queue(bp->dev);
@@ -905,6 +884,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
}
}
+
static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
union eth_rx_cqe *rr_cqe)
{
@@ -960,6 +940,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
break;
+
case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1150,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
memset(fp->sge_mask, 0xff,
(NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
- /* Clear the two last indeces in the page to 1:
- these are the indeces that correspond to the "next" element,
+ /* Clear the two last indices in the page to 1:
+ these are the indices that correspond to the "next" element,
hence will never be indicated and should be removed from
the calculations. */
bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1242,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
return err;
}
@@ -1297,14 +1278,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
- /* if alloc failed drop the packet and keep the buffer in the bin */
if (likely(new_skb)) {
+ /* fix ip xsum and give it to the stack */
+ /* (no need to map the new skb) */
prefetch(skb);
prefetch(((char *)(skb)) + 128);
- /* else fix ip xsum and give it to the stack */
- /* (no need to map the new skb) */
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > bp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1333,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
fp->tpa_pool[queue].skb = new_skb;
} else {
+ /* else drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate new skb - dropping packet!\n");
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
}
fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1371,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
int rx_pkt = 0;
- u16 queue;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -1456,7 +1436,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if ((!fp->disable_tpa) &&
(TPA_TYPE(cqe_fp_flags) !=
(TPA_TYPE_START | TPA_TYPE_END))) {
- queue = cqe->fast_path_cqe.queue_index;
+ u16 queue = cqe->fast_path_cqe.queue_index;
if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1483,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
/* is this an error packet? */
if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
- /* do we sometimes forward error packets anyway? */
DP(NETIF_MSG_RX_ERR,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- /* TBD make sure MC counts this as a drop */
+ bp->eth_stats.rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -1524,7 +1503,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped "
"because of alloc failure\n");
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
goto reuse_rx;
}
@@ -1550,7 +1529,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped because "
"of alloc failure\n");
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
goto next_rx;
@@ -1559,10 +1538,12 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_NONE;
- if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* TBD do we pass bad csum packets in promisc */
+ if (bp->rx_csum) {
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ bp->eth_stats.hw_csum_err++;
+ }
}
#ifdef BCM_VLAN
@@ -1615,6 +1596,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
struct net_device *dev = bp->dev;
int index = FP_IDX(fp);
+ /* Return here if interrupt is disabled */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+ return IRQ_HANDLED;
+ }
+
DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
index, FP_SB_ID(fp));
bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1635,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
}
DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
-#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
- return IRQ_HANDLED;
-#endif
-
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
return IRQ_HANDLED;
}
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
mask = 0x2 << bp->fp[0].sb_id;
if (status & mask) {
struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1686,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
* General service functions
*/
-static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
+static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
u32 resource_bit = (1 << resource);
- u8 port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
int cnt;
/* Validating that the resource is within range */
@@ -1714,20 +1702,26 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
return -EINVAL;
}
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
/* Validating that the resource is not already taken */
- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
if (lock_status & resource_bit) {
DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
lock_status, resource_bit);
return -EEXIST;
}
- /* Try for 1 second every 5ms */
- for (cnt = 0; cnt < 200; cnt++) {
+ /* Try for 5 second every 5ms */
+ for (cnt = 0; cnt < 1000; cnt++) {
/* Try to acquire the lock */
- REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
- resource_bit);
- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
if (lock_status & resource_bit)
return 0;
@@ -1737,11 +1731,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
return -EAGAIN;
}
-static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
+static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
u32 resource_bit = (1 << resource);
- u8 port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1746,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
return -EINVAL;
}
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
/* Validating that the resource is currently taken */
- lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
lock_status, resource_bit);
return -EFAULT;
}
- REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
+ REG_WR(bp, hw_lock_control_reg, resource_bit);
return 0;
}
/* HW Lock for shared dual port PHYs */
-static void bnx2x_phy_hw_lock(struct bnx2x *bp)
+static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
@@ -1772,25 +1774,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
(ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
}
-static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
+static void bnx2x_release_phy_lock(struct bnx2x *bp)
{
u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
(ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
mutex_unlock(&bp->port.phy_mutex);
}
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
{
/* The GPIO should be swapped if swap register is set and active */
int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
- REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
int gpio_shift = gpio_num +
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1803,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
return -EINVAL;
}
- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
/* read GPIO and mask except the float bits */
gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
@@ -1822,7 +1824,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
break;
- case MISC_REGISTERS_GPIO_INPUT_HI_Z :
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
gpio_num, gpio_shift);
/* set FLOAT */
@@ -1834,7 +1836,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
}
REG_WR(bp, MISC_REG_GPIO, gpio_reg);
- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
return 0;
}
@@ -1850,19 +1852,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
return -EINVAL;
}
- bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
/* read SPIO and mask except the float bits */
spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
switch (mode) {
- case MISC_REGISTERS_SPIO_OUTPUT_LOW :
+ case MISC_REGISTERS_SPIO_OUTPUT_LOW:
DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
/* clear FLOAT and set CLR */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
break;
- case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
+ case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
/* clear FLOAT and set SET */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1882,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
}
REG_WR(bp, MISC_REG_SPIO, spio_reg);
- bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
return 0;
}
@@ -1940,46 +1942,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
{
- u8 rc;
+ if (!BP_NOMCP(bp)) {
+ u8 rc;
- /* Initialize link parameters structure variables */
- bp->link_params.mtu = bp->dev->mtu;
+ /* Initialize link parameters structure variables */
+ /* It is recommended to turn off RX FC for jumbo frames
+ for better performance */
+ if (IS_E1HMF(bp))
+ bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
+ else if (bp->dev->mtu > 5000)
+ bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
- bnx2x_phy_hw_lock(bp);
- rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
- if (bp->link_vars.link_up)
- bnx2x_link_report(bp);
+ if (bp->link_vars.link_up)
+ bnx2x_link_report(bp);
- bnx2x_calc_fc_adv(bp);
+ bnx2x_calc_fc_adv(bp);
- return rc;
+ return rc;
+ }
+ BNX2X_ERR("Bootcode is missing -not initializing link\n");
+ return -EINVAL;
}
static void bnx2x_link_set(struct bnx2x *bp)
{
- bnx2x_phy_hw_lock(bp);
- bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
- bnx2x_calc_fc_adv(bp);
+ bnx2x_calc_fc_adv(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing -not setting link\n");
}
static void bnx2x__link_reset(struct bnx2x *bp)
{
- bnx2x_phy_hw_lock(bp);
- bnx2x_link_reset(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing -not resetting link\n");
}
static u8 bnx2x_link_test(struct bnx2x *bp)
{
u8 rc;
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
return rc;
}
@@ -1991,7 +2010,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
sum of vn_min_rates
or
0 - if all the min_rates are 0.
- In the later case fainess algorithm should be deactivated.
+ In the later case fairness algorithm should be deactivated.
If not all min_rates are zero then those that are zeroes will
be set to 1.
*/
@@ -2114,7 +2133,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
/* If FAIRNESS is enabled (not all min rates are zeroes) and
if current min rate is zero - set it to 1.
- This is a requirment of the algorithm. */
+ This is a requirement of the algorithm. */
if ((vn_min_rate == 0) && wsum)
vn_min_rate = DEF_MIN_RATE;
vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2222,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
if (bp->link_vars.link_up) {
@@ -2357,7 +2376,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
}
/* acquire split MCP access lock register */
-static int bnx2x_lock_alr(struct bnx2x *bp)
+static int bnx2x_acquire_alr(struct bnx2x *bp)
{
u32 i, j, val;
int rc = 0;
@@ -2374,15 +2393,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
msleep(5);
}
if (!(val & (1L << 31))) {
- BNX2X_ERR("Cannot acquire nvram interface\n");
+ BNX2X_ERR("Cannot acquire MCP access lock register\n");
rc = -EBUSY;
}
return rc;
}
-/* Release split MCP access lock register */
-static void bnx2x_unlock_alr(struct bnx2x *bp)
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
{
u32 val = 0;
@@ -2395,7 +2414,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
u16 rc = 0;
barrier(); /* status block is written to by the chip */
-
if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
rc |= 1;
@@ -2426,26 +2444,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
{
int port = BP_PORT(bp);
- int func = BP_FUNC(bp);
- u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
+ u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
NIG_REG_MASK_INTERRUPT_PORT0;
+ u32 aeu_mask;
- if (~bp->aeu_mask & (asserted & 0xff))
- BNX2X_ERR("IGU ERROR\n");
if (bp->attn_state & asserted)
BNX2X_ERR("IGU ERROR\n");
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, aeu_addr);
+
DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
- bp->aeu_mask, asserted);
- bp->aeu_mask &= ~(asserted & 0xff);
- DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
+ aeu_mask, asserted);
+ aeu_mask &= ~(asserted & 0xff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
- REG_WR(bp, aeu_addr, bp->aeu_mask);
+ REG_WR(bp, aeu_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
bp->attn_state |= asserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2523,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
} /* if hardwired */
- DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
- asserted, BAR_IGU_INTMEM + igu_addr);
- REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+ asserted, hc_addr);
+ REG_WR(bp, hc_addr, asserted);
/* now set back the mask */
if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2527,15 +2550,16 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
BNX2X_ERR("SPIO5 hw attention\n");
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* Fan failure attention */
- /* The PHY reset is controled by GPIO 1 */
+ /* The PHY reset is controlled by GPIO 1 */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
- /* Low power mode is controled by GPIO 2 */
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ /* Low power mode is controlled by GPIO 2 */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
/* mark the failure */
bp->link_params.ext_phy_config &=
~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
int index;
u32 reg_addr;
u32 val;
+ u32 aeu_mask;
/* need to take HW lock because MCP or other port might also
try to handle this event */
- bnx2x_lock_alr(bp);
+ bnx2x_acquire_alr(bp);
attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
HW_PRTY_ASSERT_SET_1) ||
(attn.sig[2] & group_mask.sig[2] &
HW_PRTY_ASSERT_SET_2))
- BNX2X_ERR("FATAL HW block parity attention\n");
+ BNX2X_ERR("FATAL HW block parity attention\n");
}
}
- bnx2x_unlock_alr(bp);
+ bnx2x_release_alr(bp);
- reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
+ reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
val = ~deasserted;
-/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
- val, BAR_IGU_INTMEM + reg_addr); */
- REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+ val, reg_addr);
+ REG_WR(bp, reg_addr, val);
- if (bp->aeu_mask & (deasserted & 0xff))
- BNX2X_ERR("IGU BUG!\n");
if (~bp->attn_state & deasserted)
- BNX2X_ERR("IGU BUG!\n");
+ BNX2X_ERR("IGU ERROR\n");
reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
MISC_REG_AEU_MASK_ATTN_FUNC_0;
- DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
- bp->aeu_mask |= (deasserted & 0xff);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, reg_addr);
- DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
- REG_WR(bp, reg_addr, bp->aeu_mask);
+ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
+ aeu_mask, deasserted);
+ aeu_mask |= (deasserted & 0xff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
return;
}
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
/* HW attentions */
if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
+ DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
return IRQ_HANDLED;
}
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
/* underflow */ \
d_hi = m_hi - s_hi; \
if (d_hi > 0) { \
- /* we can 'loan' 1 */ \
+ /* we can 'loan' 1 */ \
d_hi--; \
d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
} else { \
- /* m_hi <= s_hi */ \
+ /* m_hi <= s_hi */ \
d_hi = 0; \
d_lo = 0; \
} \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
d_hi = 0; \
d_lo = 0; \
} else { \
- /* m_hi >= s_hi */ \
+ /* m_hi >= s_hi */ \
d_hi = m_hi - s_hi; \
d_lo = m_lo - s_lo; \
} \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
* Init service functions
*/
-static void bnx2x_storm_stats_init(struct bnx2x *bp)
-{
- int func = BP_FUNC(bp);
-
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
- REG_WR(bp, BAR_CSTRORM_INTMEM +
- CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
-
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
- U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
- REG_WR(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
- U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-}
-
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
bp->port.old_nig_stats.brb_discard =
REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+ bp->port.old_nig_stats.brb_truncate =
+ REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
&(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
might_sleep();
while (*stats_comp != DMAE_COMP_VAL) {
- msleep(1);
if (!cnt) {
BNX2X_ERR("timeout waiting for stats finished\n");
break;
}
cnt--;
+ msleep(1);
}
return 1;
}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
- UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
new->brb_discard - old->brb_discard);
+ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+ new->brb_truncate - old->brb_truncate);
UPDATE_STAT64_NIG(egress_mac_pkt0,
etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->rx_length_errors =
estats->rx_stat_etherstatsundersizepkts_lo +
estats->jabber_packets_received;
- nstats->rx_over_errors = estats->brb_drop_lo +
- estats->brb_truncate_discard;
+ nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bp->fp->rx_comp_cons),
le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
- netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
+ netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
estats->driver_xoff, estats->brb_drop_lo);
printk(KERN_DEBUG "tstats: checksum_discard %u "
"packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
- sizeof(struct ustorm_def_status_block)/4);
+ sizeof(struct ustorm_status_block)/4);
bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
- sizeof(struct cstorm_def_status_block)/4);
+ sizeof(struct cstorm_status_block)/4);
}
-static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
- struct host_status_block *sb, dma_addr_t mapping)
+static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+ dma_addr_t mapping, int sb_id)
{
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
atten_status_block);
def_sb->atten_status_block.status_block_id = sb_id;
- bp->def_att_idx = 0;
bp->attn_state = 0;
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
reg_offset + 0xc + 0x10*index);
}
- bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
- MISC_REG_AEU_MASK_ATTN_FUNC_0));
-
reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
HC_REG_ATTN_MSG0_ADDR_L);
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
u_def_status_block);
def_sb->u_def_status_block.status_block_id = sb_id;
- bp->def_u_idx = 0;
-
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_USTRORM_INTMEM +
((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
+ REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
c_def_status_block);
def_sb->c_def_status_block.status_block_id = sb_id;
- bp->def_c_idx = 0;
-
REG_WR(bp, BAR_CSTRORM_INTMEM +
CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_CSTRORM_INTMEM +
((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
t_def_status_block);
def_sb->t_def_status_block.status_block_id = sb_id;
- bp->def_t_idx = 0;
-
REG_WR(bp, BAR_TSTRORM_INTMEM +
TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_TSTRORM_INTMEM +
((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
x_def_status_block);
def_sb->x_def_status_block.status_block_id = sb_id;
- bp->def_x_idx = 0;
-
REG_WR(bp, BAR_XSTRORM_INTMEM +
XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
REG_WR(bp, BAR_XSTRORM_INTMEM +
((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
U64_HI(section));
- REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
- BNX2X_BTR);
for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
REG_WR16(bp, BAR_XSTRORM_INTMEM +
XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
bp->stats_pending = 0;
+ bp->set_mac_pending = 0;
bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
/* HC_INDEX_U_ETH_RX_CQ_CONS */
REG_WR8(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
- HC_INDEX_U_ETH_RX_CQ_CONS),
+ U_SB_ETH_RX_CQ_INDEX),
bp->rx_ticks/12);
REG_WR16(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
- HC_INDEX_U_ETH_RX_CQ_CONS),
+ U_SB_ETH_RX_CQ_INDEX),
+ bp->rx_ticks ? 0 : 1);
+ REG_WR16(bp, BAR_USTRORM_INTMEM +
+ USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
+ U_SB_ETH_RX_BD_INDEX),
bp->rx_ticks ? 0 : 1);
/* HC_INDEX_C_ETH_TX_CQ_CONS */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
- HC_INDEX_C_ETH_TX_CQ_CONS),
+ C_SB_ETH_TX_CQ_INDEX),
bp->tx_ticks/12);
REG_WR16(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
- HC_INDEX_C_ETH_TX_CQ_CONS),
+ C_SB_ETH_TX_CQ_INDEX),
bp->tx_ticks ? 0 : 1);
}
}
@@ -4256,7 +4240,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
static void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
- u16 ring_prod, cqe_ring_prod = 0;
+ int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+ ETH_MAX_AGGREGATION_QUEUES_E1H;
+ u16 ring_prod, cqe_ring_prod;
int i, j;
bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4256,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
bp->dev->mtu + ETH_OVREHEAD);
for_each_queue(bp, j) {
- for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
- struct bnx2x_fastpath *fp = &bp->fp[j];
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+ for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4338,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
BNX2X_ERR("disabling TPA for queue[%d]\n", j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp, ring_prod);
- bnx2x_free_tpa_pool(bp, fp,
- ETH_MAX_AGGREGATION_QUEUES_E1H);
+ bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
fp->disable_tpa = 1;
ring_prod = 0;
break;
@@ -4363,13 +4348,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_sge_prod = ring_prod;
/* Allocate BDs and initialize BD ring */
- fp->rx_comp_cons = fp->rx_alloc_failed = 0;
+ fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx skbs\n", i);
- fp->rx_alloc_failed++;
+ bp->eth_stats.rx_skb_alloc_failed++;
break;
}
ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
}
context->cstorm_st_context.sb_index_number =
- HC_INDEX_C_ETH_TX_CQ_CONS;
+ C_SB_ETH_TX_CQ_INDEX;
context->cstorm_st_context.status_block_id = sb_id;
context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
int i;
tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
- tstorm_client.statistics_counter_id = 0;
+ tstorm_client.statistics_counter_id = BP_CL_ID(bp);
tstorm_client.config_flags =
TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
int func = BP_FUNC(bp);
int i;
- DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
+ DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
switch (mode) {
case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,46 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
bnx2x_set_client_config(bp);
}
-static void bnx2x_init_internal(struct bnx2x *bp)
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+ int i;
+
+ if (bp->flags & TPA_ENABLE_FLAG) {
+ struct tstorm_eth_tpa_exist tpa = {0};
+
+ tpa.tpa_exist = 1;
+
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
+ ((u32 *)&tpa)[0]);
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
+ ((u32 *)&tpa)[1]);
+ }
+
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+}
+
+static void bnx2x_init_internal_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+}
+
+static void bnx2x_init_internal_func(struct bnx2x *bp)
{
struct tstorm_eth_function_common_config tstorm_config = {0};
struct stats_indication_flags stats_flags = {0};
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
int i;
+ u16 max_agg_size;
if (is_multi(bp)) {
tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4654,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
(*(u32 *)&tstorm_config));
-/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
- (*(u32 *)&tstorm_config)); */
-
bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
bnx2x_set_storm_rx_mode(bp);
+ /* reset xstorm per client statistics */
+ for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
+ i*4, 0);
+ }
+ /* reset tstorm per client statistics */
+ for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
+ REG_WR(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
+ i*4, 0);
+ }
+
+ /* Init statistics related context */
stats_flags.collect_eth = 1;
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
((u32 *)&stats_flags)[1]);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
+ REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
((u32 *)&stats_flags)[1]);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
((u32 *)&stats_flags)[0]);
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
((u32 *)&stats_flags)[1]);
-/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
- ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+ U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+ U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+ REG_WR(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+ U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+ REG_WR(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+ U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
if (CHIP_IS_E1H(bp)) {
REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4716,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
bp->e1hov);
}
- /* Zero this manualy as its initialization is
- currently missing in the initTool */
- for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_AGG_DATA_OFFSET + 4*i, 0);
-
+ /* Init CQ ring mapping and aggregation size */
+ max_agg_size = min((u32)(bp->rx_buf_use_size +
+ 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
+ (u32)0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- u16 max_agg_size;
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4730,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
U64_HI(fp->rx_comp_mapping));
- max_agg_size = min((u32)(bp->rx_buf_use_size +
- 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
- (u32)0xffff);
REG_WR16(bp, BAR_USTRORM_INTMEM +
USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
max_agg_size);
}
}
-static void bnx2x_nic_init(struct bnx2x *bp)
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ bnx2x_init_internal_common(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ bnx2x_init_internal_port(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ bnx2x_init_internal_func(bp);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ break;
+ }
+}
+
+static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
{
int i;
@@ -4717,19 +4772,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
- bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
- fp->status_blk_mapping);
+ bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
+ FP_SB_ID(fp));
+ bnx2x_update_fpsb_idx(fp);
}
- bnx2x_init_def_sb(bp, bp->def_status_blk,
- bp->def_status_blk_mapping, DEF_SB_ID);
+ bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
+ DEF_SB_ID);
+ bnx2x_update_dsb_idx(bp);
bnx2x_update_coalesce(bp);
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_ring(bp);
bnx2x_init_sp_ring(bp);
bnx2x_init_context(bp);
- bnx2x_init_internal(bp);
- bnx2x_storm_stats_init(bp);
+ bnx2x_init_internal(bp, load_code);
bnx2x_init_ind_table(bp);
bnx2x_int_enable(bp);
}
@@ -4878,7 +4934,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
REG_WR(bp, CFC_REG_DEBUG0, 0x1);
- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
/* Write 0 to parser credits for CFC search request */
REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4989,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
REG_WR(bp, CFC_REG_DEBUG0, 0x1);
- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
/* Write 0 to parser credits for CFC search request */
REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5056,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
REG_WR(bp, CFC_REG_DEBUG0, 0x0);
- NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
DP(NETIF_MSG_HW, "done\n");
@@ -5089,11 +5145,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
#endif
-#ifndef BCM_ISCSI
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-
REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
#ifdef BCM_ISCSI
REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5214,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
}
bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
if (CHIP_IS_E1H(bp))
REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
@@ -5296,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
}
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* Fan failure is indicated by SPIO 5 */
bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
@@ -5322,16 +5376,12 @@ static int bnx2x_init_common(struct bnx2x *bp)
enable_blocks_attention(bp);
- if (bp->flags & TPA_ENABLE_FLAG) {
- struct tstorm_eth_tpa_exist tmp = {0};
-
- tmp.tpa_exist = 1;
-
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
- ((u32 *)&tmp)[0]);
- REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
- ((u32 *)&tmp)[1]);
- }
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, bp->common.shmem_base);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
return 0;
}
@@ -5483,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
/* Port DMAE comes here */
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
+ case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* add SPIO 5 to group 0 */
val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -5638,18 +5689,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
int func = BP_FUNC(bp);
u32 seq = ++bp->fw_seq;
u32 rc = 0;
+ u32 cnt = 1;
+ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
- /* let the FW do it's magic ... */
- msleep(100); /* TBD */
+ do {
+ /* let the FW do it's magic ... */
+ msleep(delay);
- if (CHIP_REV_IS_SLOW(bp))
- msleep(900);
+ rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
- rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
- DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
+ /* Give the FW up to 2 second (200*10ms) */
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
+
+ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt*delay, rc, seq);
/* is this a reply to our command? */
if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5769,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
NUM_RCQ_BD);
/* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
bnx2x_fp(bp, i, rx_sge_mapping),
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5947,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dev_kfree_skb(skb);
}
if (!fp->disable_tpa)
- bnx2x_free_tpa_pool(bp, fp,
+ bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+ ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H);
}
}
@@ -5976,8 +6034,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
bnx2x_msix_fp_int, 0,
bp->dev->name, &bp->fp[i]);
if (rc) {
- BNX2X_ERR("request fp #%d irq failed rc %d\n",
- i + offset, rc);
+ BNX2X_ERR("request fp #%d irq failed rc -%d\n",
+ i + offset, -rc);
bnx2x_free_msix_irqs(bp);
return -EBUSY;
}
@@ -6000,11 +6058,49 @@ static int bnx2x_req_irq(struct bnx2x *bp)
return rc;
}
+static void bnx2x_napi_enable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i)
+ napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
+static void bnx2x_napi_disable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i)
+ napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
+static void bnx2x_netif_start(struct bnx2x *bp)
+{
+ if (atomic_dec_and_test(&bp->intr_sem)) {
+ if (netif_running(bp->dev)) {
+ if (bp->state == BNX2X_STATE_OPEN)
+ netif_wake_queue(bp->dev);
+ bnx2x_napi_enable(bp);
+ bnx2x_int_enable(bp);
+ }
+ }
+}
+
+static void bnx2x_netif_stop(struct bnx2x *bp)
+{
+ bnx2x_int_disable_sync(bp);
+ if (netif_running(bp->dev)) {
+ bnx2x_napi_disable(bp);
+ netif_tx_disable(bp->dev);
+ bp->dev->trans_start = jiffies; /* prevent tx timeout */
+ }
+}
+
/*
* Init service functions
*/
-static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
+static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
{
struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
int port = BP_PORT(bp);
@@ -6026,11 +6122,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
config->config_table[0].cam_entry.lsb_mac_addr =
swab16(*(u16 *)&bp->dev->dev_addr[4]);
config->config_table[0].cam_entry.flags = cpu_to_le16(port);
- config->config_table[0].target_table_entry.flags = 0;
+ if (set)
+ config->config_table[0].target_table_entry.flags = 0;
+ else
+ CAM_INVALIDATE(config->config_table[0]);
config->config_table[0].target_table_entry.client_id = 0;
config->config_table[0].target_table_entry.vlan_id = 0;
- DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
+ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
+ (set ? "setting" : "clearing"),
config->config_table[0].cam_entry.msb_mac_addr,
config->config_table[0].cam_entry.middle_mac_addr,
config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6140,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
config->config_table[1].cam_entry.flags = cpu_to_le16(port);
- config->config_table[1].target_table_entry.flags =
+ if (set)
+ config->config_table[1].target_table_entry.flags =
TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+ else
+ CAM_INVALIDATE(config->config_table[1]);
config->config_table[1].target_table_entry.client_id = 0;
config->config_table[1].target_table_entry.vlan_id = 0;
@@ -6050,12 +6153,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
}
-static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
+static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
{
struct mac_configuration_cmd_e1h *config =
(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
- if (bp->state != BNX2X_STATE_OPEN) {
+ if (set && (bp->state != BNX2X_STATE_OPEN)) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
}
@@ -6079,9 +6182,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
config->config_table[0].client_id = BP_L_ID(bp);
config->config_table[0].vlan_id = 0;
config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
- config->config_table[0].flags = BP_PORT(bp);
+ if (set)
+ config->config_table[0].flags = BP_PORT(bp);
+ else
+ config->config_table[0].flags =
+ MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
- DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
+ DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
+ (set ? "setting" : "clearing"),
config->config_table[0].msb_mac_addr,
config->config_table[0].middle_mac_addr,
config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6214,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
bnx2x_rx_int(bp->fp, 10);
/* if index is different from 0
* the reply for some commands will
- * be on the none default queue
+ * be on the non default queue
*/
if (idx)
bnx2x_rx_int(&bp->fp[idx], 10);
}
- mb(); /* state is changed by bnx2x_sp_event() */
+ mb(); /* state is changed by bnx2x_sp_event() */
if (*state_p == state)
return 0;
@@ -6167,7 +6275,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
u32 load_code;
int i, rc;
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EPERM;
@@ -6183,22 +6290,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (!BP_NOMCP(bp)) {
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
if (!load_code) {
- BNX2X_ERR("MCP response failure, unloading\n");
+ BNX2X_ERR("MCP response failure, aborting\n");
return -EBUSY;
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
return -EBUSY; /* other port in diagnostic mode */
} else {
+ int port = BP_PORT(bp);
+
DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
load_count[0]++;
- load_count[1 + BP_PORT(bp)]++;
+ load_count[1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
if (load_count[0] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[1 + BP_PORT(bp)] == 1)
+ else if (load_count[1 + port] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_PORT;
else
load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6356,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
- /* Disable interrupt handling until HW is initialized */
- atomic_set(&bp->intr_sem, 1);
-
if (bp->flags & USING_MSIX_FLAG) {
rc = bnx2x_req_msix_irqs(bp);
if (rc) {
@@ -6273,22 +6379,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
- goto load_error;
+ goto load_int_disable;
}
- /* Enable interrupt handling */
- atomic_set(&bp->intr_sem, 0);
-
/* Setup NIC internals and enable interrupts */
- bnx2x_nic_init(bp);
+ bnx2x_nic_init(bp, load_code);
/* Send LOAD_DONE command to MCP */
if (!BP_NOMCP(bp)) {
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
if (!load_code) {
- BNX2X_ERR("MCP response failure, unloading\n");
+ BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
- goto load_int_disable;
+ goto load_rings_free;
}
}
@@ -6298,15 +6401,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Enable Rx interrupt handling before sending the ramrod
as it's completed on Rx FP queue */
- for_each_queue(bp, i)
- napi_enable(&bnx2x_fp(bp, i, napi));
+ bnx2x_napi_enable(bp);
+
+ /* Enable interrupt handling */
+ atomic_set(&bp->intr_sem, 0);
rc = bnx2x_setup_leading(bp);
if (rc) {
-#ifdef BNX2X_STOP_ON_ERROR
- bp->panic = 1;
-#endif
- goto load_stop_netif;
+ BNX2X_ERR("Setup leading failed!\n");
+ goto load_netif_stop;
}
if (CHIP_IS_E1H(bp))
@@ -6319,13 +6422,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_multi(bp, i);
if (rc)
- goto load_stop_netif;
+ goto load_netif_stop;
}
if (CHIP_IS_E1(bp))
- bnx2x_set_mac_addr_e1(bp);
+ bnx2x_set_mac_addr_e1(bp, 1);
else
- bnx2x_set_mac_addr_e1h(bp);
+ bnx2x_set_mac_addr_e1h(bp, 1);
if (bp->port.pmf)
bnx2x_initial_phy_init(bp);
@@ -6339,7 +6442,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_OPEN:
- /* IRQ is only requested from bnx2x_open */
netif_start_queue(bp->dev);
bnx2x_set_rx_mode(bp->dev);
if (bp->flags & USING_MSIX_FLAG)
@@ -6365,21 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return 0;
-load_stop_netif:
+load_netif_stop:
+ bnx2x_napi_disable(bp);
+load_rings_free:
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
for_each_queue(bp, i)
- napi_disable(&bnx2x_fp(bp, i, napi));
-
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
load_int_disable:
bnx2x_int_disable_sync(bp);
-
/* Release IRQs */
bnx2x_free_irq(bp);
-
- /* Free SKBs, SGEs, TPA pool and driver internals */
- bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i,
- RX_SGE_CNT*NUM_RX_SGE_PAGES);
load_error:
bnx2x_free_mem(bp);
@@ -6394,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
/* halt the connection */
bp->fp[index].state = BNX2X_FP_STATE_HALTING;
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
/* Wait for completion */
rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
@@ -6411,7 +6509,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
return rc;
}
-static void bnx2x_stop_leading(struct bnx2x *bp)
+static int bnx2x_stop_leading(struct bnx2x *bp)
{
u16 dsb_sp_prod_idx;
/* if the other port is handling traffic,
@@ -6429,7 +6527,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
&(bp->fp[0].state), 1);
if (rc) /* timeout */
- return;
+ return rc;
dsb_sp_prod_idx = *bp->dsb_sp_prod;
@@ -6441,20 +6539,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
so there is not much to do if this times out
*/
while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
- msleep(1);
if (!cnt) {
DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
"dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
*bp->dsb_sp_prod, dsb_sp_prod_idx);
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
+#else
+ rc = -EBUSY;
#endif
break;
}
cnt--;
+ msleep(1);
}
bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
+
+ return rc;
}
static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6598,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
if (val)
DP(NETIF_MSG_IFDOWN,
- "BRB1 is not empty %d blooks are occupied\n", val);
+ "BRB1 is not empty %d blocks are occupied\n", val);
/* TODO: Close Doorbell port? */
}
@@ -6536,43 +6638,35 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
-/* msut be called with rtnl_lock */
+/* must be called with rtnl_lock */
static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
{
+ int port = BP_PORT(bp);
u32 reset_code = 0;
- int i, cnt;
+ int i, cnt, rc;
bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_set_storm_rx_mode(bp);
- if (netif_running(bp->dev)) {
- netif_tx_disable(bp->dev);
- bp->dev->trans_start = jiffies; /* prevent tx timeout */
- }
-
+ bnx2x_netif_stop(bp);
+ if (!netif_running(bp->dev))
+ bnx2x_napi_disable(bp);
del_timer_sync(&bp->timer);
SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- /* Wait until all fast path tasks complete */
+ /* Wait until tx fast path tasks complete */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
-#ifdef BNX2X_STOP_ON_ERROR
-#ifdef __powerpc64__
- DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
- DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
-#endif
- fp->tpa_queue_used);
-#endif
cnt = 1000;
smp_rmb();
- while (bnx2x_has_work(fp)) {
- msleep(1);
+ while (BNX2X_HAS_TX_WORK(fp)) {
+
+ bnx2x_tx_int(fp, 1000);
if (!cnt) {
BNX2X_ERR("timeout waiting for queue[%d]\n",
i);
@@ -6584,40 +6678,68 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
#endif
}
cnt--;
+ msleep(1);
smp_rmb();
}
}
-
- /* Wait until all slow path tasks complete */
- cnt = 1000;
- while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
- msleep(1);
-
- for_each_queue(bp, i)
- napi_disable(&bnx2x_fp(bp, i, napi));
- /* Disable interrupts after Tx and Rx are disabled on stack level */
- bnx2x_int_disable_sync(bp);
+ /* Give HW time to discard old tx messages */
+ msleep(1);
/* Release IRQs */
bnx2x_free_irq(bp);
- if (bp->flags & NO_WOL_FLAG)
+ if (CHIP_IS_E1(bp)) {
+ struct mac_configuration_cmd *config =
+ bnx2x_sp(bp, mcast_config);
+
+ bnx2x_set_mac_addr_e1(bp, 0);
+
+ for (i = 0; i < config->hdr.length_6b; i++)
+ CAM_INVALIDATE(config->config_table[i]);
+
+ config->hdr.length_6b = i;
+ if (CHIP_REV_IS_SLOW(bp))
+ config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
+ else
+ config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
+ config->hdr.client_id = BP_CL_ID(bp);
+ config->hdr.reserved1 = 0;
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+ U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+ U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+
+ } else { /* E1H */
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+ bnx2x_set_mac_addr_e1h(bp, 0);
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+ }
+
+ if (unload_mode == UNLOAD_NORMAL)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ else if (bp->flags & NO_WOL_FLAG) {
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+ if (CHIP_IS_E1H(bp))
+ REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
- else if (bp->wol) {
- u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ } else if (bp->wol) {
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
u32 val;
-
/* The mac address is written to entries 1-4 to
preserve entry 0 which is used by the PMF */
+ u8 entry = (BP_E1HVN(bp) + 1)*8;
+
val = (mac_addr[0] << 8) | mac_addr[1];
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
- val);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
@@ -6630,23 +6752,14 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
if (bnx2x_stop_multi(bp, i))
goto unload_error;
- if (CHIP_IS_E1H(bp))
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
-
- bnx2x_stop_leading(bp);
-#ifdef BNX2X_STOP_ON_ERROR
- /* If ramrod completion timed out - break here! */
- if (bp->panic) {
+ rc = bnx2x_stop_leading(bp);
+ if (rc) {
BNX2X_ERR("Stop leading failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
return -EBUSY;
- }
+#else
+ goto unload_error;
#endif
-
- if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
- (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
- DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
- "state 0x%x fp[0].state 0x%x\n",
- bp->state, bp->fp[0].state);
}
unload_error:
@@ -6656,12 +6769,12 @@ unload_error:
DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
load_count[0]--;
- load_count[1 + BP_PORT(bp)]--;
+ load_count[1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
if (load_count[0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[1 + BP_PORT(bp)] == 0)
+ else if (load_count[1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6794,7 @@ unload_error:
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i,
- RX_SGE_CNT*NUM_RX_SGE_PAGES);
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
@@ -6733,49 +6845,88 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
*/
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+ if (val == 0x7)
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- /* save our func and fw_seq */
+ /* save our func */
int func = BP_FUNC(bp);
- u16 fw_seq = bp->fw_seq;
+ u32 swap_en;
+ u32 swap_val;
BNX2X_DEV_INFO("UNDI is active! reset device\n");
/* try unload UNDI on port 0 */
bp->func = 0;
- bp->fw_seq = (SHMEM_RD(bp,
- func_mb[bp->func].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
-
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
reset_code = bnx2x_fw_command(bp, reset_code);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
/* if UNDI is loaded on the other port */
if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ /* send "DONE" for previous unload */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+ /* unload UNDI on port 1 */
bp->func = 1;
- bp->fw_seq = (SHMEM_RD(bp,
- func_mb[bp->func].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
-
- bnx2x_fw_command(bp,
- DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
- bnx2x_fw_command(bp,
- DRV_MSG_CODE_UNLOAD_DONE);
-
- /* restore our func and fw_seq */
- bp->func = func;
- bp->fw_seq = fw_seq;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ bnx2x_fw_command(bp, reset_code);
}
+ REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
+ HC_REG_CONFIG_0), 0x1000);
+
+ /* close input traffic and wait for it */
+ /* Do not rcv packets to BRB */
+ REG_WR(bp,
+ (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+ /* Do not direct rcv packets that are not for MCP to
+ * the BRB */
+ REG_WR(bp,
+ (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+ /* clear AEU */
+ REG_WR(bp,
+ (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+ msleep(10);
+
+ /* save NIG port swap info */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
/* reset device */
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
- 0xd3ffff7f);
+ 0xd3ffffff);
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
0x1403);
+ /* take the NIG out of reset and restore swap values */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ MISC_REGISTERS_RESET_REG_1_RST_NIG);
+ REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+ REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+ /* send unload done to the MCP */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+ /* restore our func and fw_seq */
+ bp->func = func;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
}
}
}
@@ -6783,6 +6934,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
u32 val, val2, val3, val4, id;
+ u16 pmc;
/* Get the chip revision id and number. */
/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6992,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
BNX2X_ERR("This driver needs bc_ver %X but found %X,"
" please upgrade BC\n", BNX2X_BC_VER, val);
}
- BNX2X_DEV_INFO("%sWoL Capable\n",
- (bp->flags & NO_WOL_FLAG)? "Not " : "");
+
+ if (BP_E1HVN(bp) == 0) {
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+ } else {
+ /* no WOL capability for E1HVN != 0 */
+ bp->flags |= NO_WOL_FLAG;
+ }
+ BNX2X_DEV_INFO("%sWoL capable\n",
+ (bp->flags & NO_WOL_FLAG) ? "Not " : "");
val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7434,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_config =
SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
- val =
- (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
- FUNC_MF_CFG_E1HOV_TAG_MASK);
+ val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK);
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
bp->e1hov = val;
@@ -7324,7 +7483,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
/* only supposed to happen on emulation/FPGA */
- BNX2X_ERR("warning rendom MAC workaround active\n");
+ BNX2X_ERR("warning random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
}
@@ -7337,8 +7496,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
int func = BP_FUNC(bp);
int rc;
- if (nomcp)
- bp->flags |= NO_MCP_FLAG;
+ /* Disable interrupt handling until HW is initialized */
+ atomic_set(&bp->intr_sem, 1);
mutex_init(&bp->port.phy_mutex);
@@ -7377,8 +7536,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->tx_ticks = 50;
bp->rx_ticks = 25;
- bp->stats_ticks = 1000000 & 0xffff00;
-
bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
bp->current_interval = (poll ? poll : bp->timer_interval);
@@ -7628,25 +7785,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
- char phy_fw_ver[PHY_FW_VER_LEN];
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_get_ext_phy_fw_version(&bp->link_params,
(bp->state != BNX2X_STATE_CLOSED),
phy_fw_ver, PHY_FW_VER_LEN);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
}
- snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
- BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
- ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
+ snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8254,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
if (eeprom->magic == 0x00504859)
if (bp->port.pmf) {
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_flash_download(bp, BP_PORT(bp),
bp->link_params.ext_phy_config,
(bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8266,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
rc |= bnx2x_phy_init(&bp->link_params,
&bp->link_vars);
}
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
} else /* Only the PMF can access the PHY */
return -EINVAL;
@@ -8128,7 +8285,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs = bp->rx_ticks;
coal->tx_coalesce_usecs = bp->tx_ticks;
- coal->stats_block_coalesce_usecs = bp->stats_ticks;
return 0;
}
@@ -8146,44 +8302,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
if (bp->tx_ticks > 0x3000)
bp->tx_ticks = 0x3000;
- bp->stats_ticks = coal->stats_block_coalesce_usecs;
- if (bp->stats_ticks > 0xffff00)
- bp->stats_ticks = 0xffff00;
- bp->stats_ticks &= 0xffff00;
-
if (netif_running(dev))
bnx2x_update_coalesce(bp);
return 0;
}
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int changed = 0;
- int rc = 0;
-
- if (data & ETH_FLAG_LRO) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
-
- } else if (dev->features & NETIF_F_LRO) {
- dev->features &= ~NETIF_F_LRO;
- bp->flags &= ~TPA_ENABLE_FLAG;
- changed = 1;
- }
-
- if (changed && netif_running(dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
-
- return rc;
-}
-
static void bnx2x_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -8266,7 +8390,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (epause->autoneg) {
if (!(bp->port.supported & SUPPORTED_Autoneg)) {
- DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+ DP(NETIF_MSG_LINK, "autoneg not supported\n");
return -EINVAL;
}
@@ -8285,6 +8409,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
+static int bnx2x_set_flags(struct net_device *dev, u32 data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int changed = 0;
+ int rc = 0;
+
+ /* TPA requires Rx CSUM offloading */
+ if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ bp->flags |= TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+
+ } else if (dev->features & NETIF_F_LRO) {
+ dev->features &= ~NETIF_F_LRO;
+ bp->flags &= ~TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+
+ if (changed && netif_running(dev)) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
+
+ return rc;
+}
+
static u32 bnx2x_get_rx_csum(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8447,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
{
struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
bp->rx_csum = data;
- return 0;
+
+ /* Disable TPA, when Rx CSUM is disabled. Otherwise all
+ TPA'ed packets will be discarded due to wrong TCP CSUM */
+ if (!data) {
+ u32 flags = ethtool_op_get_flags(dev);
+
+ rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
+ }
+
+ return rc;
}
static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8497,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
{
int idx, i, rc = -ENODEV;
u32 wr_val = 0;
+ int port = BP_PORT(bp);
static const struct {
u32 offset0;
u32 offset1;
@@ -8400,7 +8563,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
u32 offset, mask, save_val, val;
- int port = BP_PORT(bp);
offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
mask = reg_tbl[i].mask;
@@ -8446,16 +8608,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
static const struct {
char *name;
u32 offset;
- u32 mask;
+ u32 e1_mask;
+ u32 e1h_mask;
} prty_tbl[] = {
- { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
- { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
- { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
- { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
- { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
- { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
-
- { NULL, 0xffffffff, 0 }
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
+
+ { NULL, 0xffffffff, 0, 0 }
};
if (!netif_running(bp->dev))
@@ -8469,7 +8632,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
/* Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
- if (val & ~(prty_tbl[i].mask)) {
+ if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
+ (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@@ -8482,34 +8646,6 @@ test_mem_exit:
return rc;
}
-static void bnx2x_netif_start(struct bnx2x *bp)
-{
- int i;
-
- if (atomic_dec_and_test(&bp->intr_sem)) {
- if (netif_running(bp->dev)) {
- bnx2x_int_enable(bp);
- for_each_queue(bp, i)
- napi_enable(&bnx2x_fp(bp, i, napi));
- if (bp->state == BNX2X_STATE_OPEN)
- netif_wake_queue(bp->dev);
- }
- }
-}
-
-static void bnx2x_netif_stop(struct bnx2x *bp)
-{
- int i;
-
- if (netif_running(bp->dev)) {
- netif_tx_disable(bp->dev);
- bp->dev->trans_start = jiffies; /* prevent tx timeout */
- for_each_queue(bp, i)
- napi_disable(&bnx2x_fp(bp, i, napi));
- }
- bnx2x_int_disable_sync(bp);
-}
-
static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
{
int cnt = 1000;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
if (loopback_mode == BNX2X_MAC_LOOPBACK) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
} else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
- bnx2x_phy_hw_lock(bp);
+ bnx2x_acquire_phy_lock(bp);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
- bnx2x_phy_hw_unlock(bp);
+ bnx2x_release_phy_lock(bp);
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up);
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
if (!netif_running(dev))
return;
- /* offline tests are not suppoerted in MF mode */
+ /* offline tests are not supported in MF mode */
if (IS_E1HMF(bp))
etest->flags &= ~ETH_TEST_FL_OFFLINE;
@@ -8827,76 +8963,99 @@ static const struct {
long offset;
int size;
u32 flags;
- char string[ETH_GSTRING_LEN];
+#define STATS_FLAGS_PORT 1
+#define STATS_FLAGS_FUNC 2
+ u8 string[ETH_GSTRING_LEN];
} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" },
- { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" },
- { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" },
- { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
+/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
+ 8, STATS_FLAGS_FUNC, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, STATS_FLAGS_FUNC, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, 1, "rx_ucast_packets" },
+ 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, 1, "rx_mcast_packets" },
+ 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, 1, "rx_bcast_packets" },
+ 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, 1, "tx_packets" },
+ 8, STATS_FLAGS_FUNC, "tx_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
- 8, 0, "tx_mac_errors" },
+ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
- 8, 0, "tx_carrier_errors" },
+ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
- 8, 0, "rx_crc_errors" },
+ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
- 8, 0, "rx_align_errors" },
+ 8, STATS_FLAGS_PORT, "rx_align_errors" },
{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
- 8, 0, "tx_single_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
- 8, 0, "tx_multi_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
- 8, 0, "tx_deferred" },
+ 8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
- 8, 0, "tx_excess_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
- 8, 0, "tx_late_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
- 8, 0, "tx_total_collisions" },
+ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
- 8, 0, "rx_fragments" },
-/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
+ 8, STATS_FLAGS_PORT, "rx_fragments" },
+/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, STATS_FLAGS_PORT, "rx_jabbers" },
{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
- 8, 0, "rx_undersize_packets" },
+ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
{ STATS_OFFSET32(jabber_packets_received),
- 4, 1, "rx_oversize_packets" },
+ 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
- 8, 0, "tx_64_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
- 8, 0, "tx_65_to_127_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
- 8, 0, "tx_128_to_255_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
- 8, 0, "tx_256_to_511_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
- 8, 0, "tx_512_to_1023_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
- 8, 0, "tx_1024_to_1522_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
- 8, 0, "tx_1523_to_9022_byte_packets" },
+ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
- 8, 0, "rx_xon_frames" },
+ 8, STATS_FLAGS_PORT, "rx_xon_frames" },
{ STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
- 8, 0, "rx_xoff_frames" },
- { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" },
- { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
+ 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
+ { STATS_OFFSET32(tx_stat_outxonsent_hi),
+ 8, STATS_FLAGS_PORT, "tx_xon_frames" },
+ { STATS_OFFSET32(tx_stat_outxoffsent_hi),
+ 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
- 8, 0, "rx_mac_ctrl_frames" },
- { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" },
- { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" },
- { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" },
- { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" },
-/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
+ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+ { STATS_OFFSET32(no_buff_discard),
+ 4, STATS_FLAGS_FUNC, "rx_discards" },
+ { STATS_OFFSET32(xxoverflow_discard),
+ 4, STATS_FLAGS_PORT, "rx_fw_discards" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, STATS_FLAGS_PORT, "brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, STATS_FLAGS_PORT, "brb_truncate" },
+/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
+/* 42 */{ STATS_OFFSET32(hw_csum_err),
+ 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
};
+#define IS_NOT_E1HMF_STAT(bp, i) \
+ (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
+
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+ if (IS_NOT_E1HMF_STAT(bp, i))
continue;
strcpy(buf + j*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
int i, num_stats = 0;
for (i = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+ if (IS_NOT_E1HMF_STAT(bp, i))
continue;
num_stats++;
}
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
int i, j;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
+ if (IS_NOT_E1HMF_STAT(bp, i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
PCI_PM_CTRL_PME_STATUS));
if (pmcsr & PCI_PM_CTRL_STATE_MASK)
- /* delay required during transition out of D3hot */
+ /* delay required during transition out of D3hot */
msleep(20);
break;
@@ -9092,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
napi);
struct bnx2x *bp = fp->bp;
int work_done = 0;
+ u16 rx_cons_sb;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -9104,17 +9264,22 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
bnx2x_update_fpsb_idx(fp);
- if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
- (fp->tx_pkt_prod != fp->tx_pkt_cons))
+ if (BNX2X_HAS_TX_WORK(fp))
bnx2x_tx_int(fp, budget);
- if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
+ rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+ if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+ rx_cons_sb++;
+ if (BNX2X_HAS_RX_WORK(fp))
work_done = bnx2x_rx_int(fp, budget);
- rmb(); /* bnx2x_has_work() reads the status block */
+ rmb(); /* BNX2X_HAS_WORK() reads the status block */
+ rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+ if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+ rx_cons_sb++;
/* must not complete if we consumed full budget */
- if ((work_done < budget) && !bnx2x_has_work(fp)) {
+ if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
#ifdef BNX2X_STOP_ON_ERROR
poll_panic:
@@ -9131,7 +9296,7 @@ poll_panic:
/* we split the first BD into headers and data BDs
- * to ease the pain of our fellow micocode engineers
+ * to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
* So far this has only been observed to happen
* in Other Operating Systems(TM)
@@ -9238,7 +9403,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Check if LSO packet needs to be copied:
3 = 1 (for headers BD) + 2 (for PBD and last BD) */
int wnd_size = MAX_FETCH_BD - 3;
- /* Number of widnows to check */
+ /* Number of windows to check */
int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
int wnd_idx = 0;
int frag_idx = 0;
@@ -9327,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
fp_index = (smp_processor_id() % bp->num_queues);
fp = &bp->fp[fp_index];
- if (unlikely(bnx2x_tx_avail(bp->fp) <
- (skb_shinfo(skb)->nr_frags + 3))) {
+ if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bp->eth_stats.driver_xoff++,
netif_stop_queue(dev);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -9340,7 +9504,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
- /* First, check if we need to linearaize the skb
+ /* First, check if we need to linearize the skb
(due to FW restrictions) */
if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
/* Statistics of linearization */
@@ -9349,7 +9513,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
"silently dropping this SKB\n");
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
}
}
@@ -9372,7 +9536,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_bd->general_data = (UNICAST_ADDRESS <<
ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
- tx_bd->general_data |= 1; /* header nbd */
+ /* header nbd */
+ tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
/* remember the first BD of the packet */
tx_buf->first_bd = fp->tx_bd_prod;
@@ -9390,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_bd->vlan = cpu_to_le16(pkt_prod);
if (xmit_type) {
-
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
pbd = (void *)&fp->tx_desc_ring[bd_prod];
@@ -9451,7 +9615,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
+ nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
tx_bd->nbd = cpu_to_le16(nbd);
tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
@@ -9721,9 +9885,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev)) {
if (CHIP_IS_E1(bp))
- bnx2x_set_mac_addr_e1(bp);
+ bnx2x_set_mac_addr_e1(bp, 1);
else
- bnx2x_set_mac_addr_e1h(bp);
+ bnx2x_set_mac_addr_e1h(bp, 1);
}
return 0;
@@ -9734,6 +9898,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct bnx2x *bp = netdev_priv(dev);
+ int port = BP_PORT(bp);
int err;
switch (cmd) {
@@ -9749,7 +9914,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
mutex_lock(&bp->port.phy_mutex);
- err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
+ err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
DEFAULT_PHY_DEV_ADDR,
(data->reg_num & 0x1f), &mii_regval);
data->val_out = mii_regval;
@@ -9765,7 +9930,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
mutex_lock(&bp->port.phy_mutex);
- err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
+ err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
DEFAULT_PHY_DEV_ADDR,
(data->reg_num & 0x1f), data->val_in);
mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10306,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
@@ -10174,7 +10339,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 15c9a99..a67b0c3 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * The registers description starts with the regsister Access type followed
+ * The registers description starts with the register Access type followed
* by size in bits. For example [RW 32]. The access types are:
* R - Read only
* RC - Clear on read
@@ -49,7 +49,7 @@
/* [RW 10] Write client 0: Assert pause threshold. */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
-/* [R 24] The number of full blocks occpied by port. */
+/* [R 24] The number of full blocks occupied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [RW 1] Reset the design by software. */
#define BRB1_REG_SOFT_RESET 0x600dc
@@ -740,6 +740,7 @@
#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
#define HC_REG_ATTN_NUM_P0 0x108038
#define HC_REG_ATTN_NUM_P1 0x10803c
+#define HC_REG_COMMAND_REG 0x108180
#define HC_REG_CONFIG_0 0x108000
#define HC_REG_CONFIG_1 0x108004
#define HC_REG_FUNC_NUM_P0 0x1080ac
@@ -1372,6 +1373,23 @@
be asserted). */
#define MISC_REG_DRIVER_CONTROL_16 0xa5f0
#define MISC_REG_DRIVER_CONTROL_16_SIZE 2
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+ to address 1 will set a request to control all the clients that their
+ appropriate bit (in the write command) is set. if the client is free (the
+ appropriate bit in all the other drivers is clear) one will be written to
+ that driver register; if the client isn't free the bit will remain zero.
+ if the appropriate bit is set (the driver request to gain control on a
+ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+ interrupt will be asserted). write to address 0 will set a request to
+ free all the clients that their appropriate bit (in the write command) is
+ set. if the appropriate bit is clear (the driver request to free a client
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
only. */
#define MISC_REG_E1HMF_MODE 0xa5f8
@@ -1394,13 +1412,13 @@
#define MISC_REG_GPIO 0xa490
/* [R 28] this field hold the last information that caused reserved
attention. bits [19:0] - address; [22:20] function; [23] reserved;
- [27:24] the master thatcaused the attention - according to the following
+ [27:24] the master that caused the attention - according to the following
encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
dbu; 8 = dmae */
#define MISC_REG_GRC_RSV_ATTN 0xa3c0
/* [R 28] this field hold the last information that caused timeout
attention. bits [19:0] - address; [22:20] function; [23] reserved;
- [27:24] the master thatcaused the attention - according to the following
+ [27:24] the master that caused the attention - according to the following
encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
dbu; 8 = dmae */
#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
@@ -1677,6 +1695,7 @@
/* [RW 8] init credit counter for port0 in LLH */
#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
#define NIG_REG_LLH0_XCM_MASK 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
/* [RW 1] send to BRB1 if no match on any of RMP rules. */
#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
/* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -1727,6 +1746,9 @@
/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
for port0 */
#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
between 1024 and 1522 bytes for port0 */
#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
@@ -2298,7 +2320,7 @@
/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
-128k */
#define PXP2_REG_RQ_QM_P_SIZE 0x120050
-/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
#define PXP2_REG_RQ_RBC_DONE 0x1201b0
/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
@@ -2406,7 +2428,7 @@
/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
buffer reaches this number has_payload will be asserted */
#define PXP2_REG_WR_DMAE_MPS 0x1205ec
-/* [RW 10] if Number of entries in dmae fifo will be higer than this
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
threshold then has_payload indication will be asserted; the default value
should be equal to &gt; write MBS size! */
#define PXP2_REG_WR_DMAE_TH 0x120368
@@ -2427,7 +2449,7 @@
/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
buffer reaches this number has_payload will be asserted */
#define PXP2_REG_WR_TSDM_MPS 0x1205d4
-/* [RW 10] if Number of entries in usdmdp fifo will be higer than this
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
threshold then has_payload indication will be asserted; the default value
should be equal to &gt; write MBS size! */
#define PXP2_REG_WR_USDMDP_TH 0x120348
@@ -3294,12 +3316,12 @@
#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4)
#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4
-/* [R 1] debug only: This bit indicates wheter indicates that external
+/* [R 1] debug only: This bit indicates whether indicates that external
buffer was wrapped (oldest data was thrown); Relevant only when
~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124
#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1
-/* [R 1] debug only: This bit indicates wheter the internal buffer was
+/* [R 1] debug only: This bit indicates whether the internal buffer was
wrapped (oldest data was thrown) Relevant only when
~dbg_registers_debug_target=0 (internal buffer) */
#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128
@@ -4944,6 +4966,7 @@
#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
#define MISC_REGISTERS_GPIO_0 0
#define MISC_REGISTERS_GPIO_1 1
#define MISC_REGISTERS_GPIO_2 2
@@ -4959,6 +4982,7 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
@@ -4993,7 +5017,9 @@
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_8072_MDIO 0
#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_SPIO 2
+#define HW_LOCK_RESOURCE_UNDI 5
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -5144,59 +5170,73 @@
#define GRCBASE_MISC_AEU GRCBASE_MISC
-/*the offset of the configuration space in the pci core register*/
+/* offset of configuration space in the pci core register */
#define PCICFG_OFFSET 0x2000
#define PCICFG_VENDOR_ID_OFFSET 0x00
#define PCICFG_DEVICE_ID_OFFSET 0x02
#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
#define PCICFG_STATUS_OFFSET 0x06
-#define PCICFG_REVESION_ID 0x08
+#define PCICFG_REVESION_ID 0x08
#define PCICFG_CACHE_LINE_SIZE 0x0c
#define PCICFG_LATENCY_TIMER 0x0d
-#define PCICFG_BAR_1_LOW 0x10
-#define PCICFG_BAR_1_HIGH 0x14
-#define PCICFG_BAR_2_LOW 0x18
-#define PCICFG_BAR_2_HIGH 0x1c
-#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
-#define PCICFG_INT_LINE 0x3c
-#define PCICFG_INT_PIN 0x3d
-#define PCICFG_PM_CSR_OFFSET 0x4c
-#define PCICFG_GRC_ADDRESS 0x78
-#define PCICFG_GRC_DATA 0x80
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
#define PCICFG_DEVICE_CONTROL 0xb4
#define PCICFG_LINK_CONTROL 0xbc
-#define PCICFG_COMMAND_IO_SPACE (1<<0)
-#define PCICFG_COMMAND_MEM_SPACE (1<<1)
-#define PCICFG_COMMAND_BUS_MASTER (1<<2)
-#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
-#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
-#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
-#define PCICFG_COMMAND_PERR_ENA (1<<6)
-#define PCICFG_COMMAND_STEPPING (1<<7)
-#define PCICFG_COMMAND_SERR_ENA (1<<8)
-#define PCICFG_COMMAND_FAST_B2B (1<<9)
-#define PCICFG_COMMAND_INT_DISABLE (1<<10)
-#define PCICFG_COMMAND_RESERVED (0x1f<<11)
-
-#define PCICFG_PM_CSR_STATE (0x3<<0)
-#define PCICFG_PM_CSR_PME_STATUS (1<<15)
#define BAR_USTRORM_INTMEM 0x400000
#define BAR_CSTRORM_INTMEM 0x410000
#define BAR_XSTRORM_INTMEM 0x420000
#define BAR_TSTRORM_INTMEM 0x430000
+/* for accessing the IGU in case of status block ACK */
#define BAR_IGU_INTMEM 0x440000
#define BAR_DOORBELL_OFFSET 0x800000
#define BAR_ME_REGISTER 0x450000
-
-#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */
-#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
@@ -5213,11 +5253,11 @@
#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
-#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
-#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
-#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
-#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
-#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
@@ -5234,46 +5274,44 @@
#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
-#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
-#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
/* config_3 offset */
-#define GRC_CONFIG_3_SIZE_REG (0x40c)
-#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
-#define PCI_CONFIG_3_FORCE_PME (1L<<24)
-#define PCI_CONFIG_3_PME_STATUS (1L<<25)
-#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
-#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
-#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
-#define PCI_CONFIG_3_PCI_POWER (1L<<31)
-
-/* config_2 offset */
-#define GRC_CONFIG_2_SIZE_REG 0x408
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
#define GRC_BAR2_CONFIG 0x4e0
-#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
-#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
-#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
-#define PCI_PM_DATA_A (0x410)
-#define PCI_PM_DATA_B (0x414)
-#define PCI_ID_VAL1 (0x434)
-#define PCI_ID_VAL2 (0x438)
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5522,6 +5560,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_GEN_CTRL 0xca10
#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
#define MDIO_PMA_REG_ROM_VER1 0xca19
#define MDIO_PMA_REG_ROM_VER2 0xca1a
#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
@@ -5576,7 +5616,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
#define MDIO_AN_REG_CL37_AN 0xffe0
-#define MDIO_AN_REG_CL37_FD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
#define IGU_FUNC_BASE 0x0400
@@ -5600,4 +5641,13 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_INT_NOP 2
#define IGU_INT_NOP2 3
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index a7800e5..ec6b0af 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 19d32a2..453115a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1838,7 +1838,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if ((le16_to_cpu(rfd->command) & cb_el) &&
(RU_RUNNING == nic->ru_running))
- if (readb(&nic->csr->scb.status) & rus_no_res)
+ if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED;
return -ENODATA;
}
@@ -1861,7 +1861,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if ((le16_to_cpu(rfd->command) & cb_el) &&
(RU_RUNNING == nic->ru_running)) {
- if (readb(&nic->csr->scb.status) & rus_no_res)
+ if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED;
}
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index b9f90a5..213437d 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -208,7 +208,7 @@ struct e1000_option {
} r;
struct { /* list_option info */
int nr;
- struct e1000_opt_list { int i; char *str; } *p;
+ const struct e1000_opt_list { int i; char *str; } *p;
} l;
} arg;
};
@@ -242,7 +242,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
break;
case list_option: {
int i;
- struct e1000_opt_list *ent;
+ const struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
@@ -279,7 +279,9 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
void __devinit e1000_check_options(struct e1000_adapter *adapter)
{
+ struct e1000_option opt;
int bd = adapter->bd_number;
+
if (bd >= E1000_MAX_NIC) {
DPRINTK(PROBE, NOTICE,
"Warning: no configuration for board #%i\n", bd);
@@ -287,19 +289,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
{ /* Transmit Descriptor Count */
- struct e1000_option opt = {
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_TXD),
.def = E1000_DEFAULT_TXD,
- .arg = { .r = { .min = E1000_MIN_TXD }}
+ .arg = { .r = {
+ .min = E1000_MIN_TXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
+ }}
};
- struct e1000_tx_ring *tx_ring = adapter->tx_ring;
- int i;
- e1000_mac_type mac_type = adapter->hw.mac_type;
- opt.arg.r.max = mac_type < e1000_82544 ?
- E1000_MAX_TXD : E1000_MAX_82544_TXD;
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
@@ -313,19 +317,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
- struct e1000_option opt = {
+ struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_RXD),
.def = E1000_DEFAULT_RXD,
- .arg = { .r = { .min = E1000_MIN_RXD }}
+ .arg = { .r = {
+ .min = E1000_MIN_RXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ }}
};
- struct e1000_rx_ring *rx_ring = adapter->rx_ring;
- int i;
- e1000_mac_type mac_type = adapter->hw.mac_type;
- opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
- E1000_MAX_82544_RXD;
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
@@ -339,7 +345,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = enable_option,
.name = "Checksum Offload",
.err = "defaulting to Enabled",
@@ -363,7 +369,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
{ E1000_FC_FULL, "Flow Control Enabled" },
{ E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
@@ -381,7 +387,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Interrupt Delay */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
@@ -399,7 +405,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Absolute Interrupt Delay */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TADV),
@@ -417,7 +423,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Interrupt Delay */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
@@ -435,7 +441,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Absolute Interrupt Delay */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RADV),
@@ -453,7 +459,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Throttling Rate */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of " __MODULE_STRING(DEFAULT_ITR),
@@ -497,7 +503,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Smart Power Down */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
@@ -513,7 +519,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
}
{ /* Kumeran Lock Loss Workaround */
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
@@ -578,16 +584,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
{
+ struct e1000_option opt;
unsigned int speed, dplx, an;
int bd = adapter->bd_number;
{ /* Speed */
- struct e1000_opt_list speed_list[] = {{ 0, "" },
- { SPEED_10, "" },
- { SPEED_100, "" },
- { SPEED_1000, "" }};
+ static const struct e1000_opt_list speed_list[] = {
+ { 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }};
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = list_option,
.name = "Speed",
.err = "parameter ignored",
@@ -604,11 +612,12 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
}
}
{ /* Duplex */
- struct e1000_opt_list dplx_list[] = {{ 0, "" },
- { HALF_DUPLEX, "" },
- { FULL_DUPLEX, "" }};
+ static const struct e1000_opt_list dplx_list[] = {
+ { 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }};
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = list_option,
.name = "Duplex",
.err = "parameter ignored",
@@ -637,7 +646,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
"parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
- struct e1000_opt_list an_list[] =
+ static const struct e1000_opt_list an_list[] =
#define AA "AutoNeg advertising "
{{ 0x01, AA "10/HD" },
{ 0x02, AA "10/FD" },
@@ -671,7 +680,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
{ 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
{ 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
- struct e1000_option opt = {
+ opt = (struct e1000_option) {
.type = list_option,
.name = "AutoNeg",
.err = "parameter ignored",
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index f823b8b..14b0e6c 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -389,7 +389,7 @@
/* Interrupt Cause Set */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
/* Transmit Descriptor Control */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cf57050..ac4e506 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -326,6 +326,7 @@ struct e1000_info {
#define FLAG_RX_CSUM_ENABLED (1 << 28)
#define FLAG_TSO_FORCE (1 << 29)
#define FLAG_RX_RESTART_NOW (1 << 30)
+#define FLAG_MSI_TEST_FAILED (1 << 31)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index cf9679f..e21c9e0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -177,7 +177,7 @@ static u32 e1000_get_link(struct net_device *netdev)
u32 status;
status = er32(STATUS);
- return (status & E1000_STATUS_LU);
+ return (status & E1000_STATUS_LU) ? 1 : 0;
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 05b0b2f..d266510 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -510,9 +510,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data - NET_IP_ALIGN,
- skb->data - NET_IP_ALIGN,
- length + NET_IP_ALIGN);
+ skb_copy_to_linear_data_offset(new_skb,
+ -NET_IP_ALIGN,
+ (skb->data -
+ NET_IP_ALIGN),
+ (length +
+ NET_IP_ALIGN));
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
@@ -1233,26 +1236,36 @@ static irqreturn_t e1000_intr(int irq, void *data)
return IRQ_HANDLED;
}
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
static int e1000_request_irq(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- irq_handler_t handler = e1000_intr;
int irq_flags = IRQF_SHARED;
int err;
- if (!pci_enable_msi(adapter->pdev)) {
- adapter->flags |= FLAG_MSI_ENABLED;
- handler = e1000_intr_msi;
- irq_flags = 0;
+ if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) {
+ err = pci_enable_msi(adapter->pdev);
+ if (!err) {
+ adapter->flags |= FLAG_MSI_ENABLED;
+ irq_flags = 0;
+ }
}
- err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
- netdev);
+ err = request_irq(adapter->pdev->irq,
+ ((adapter->flags & FLAG_MSI_ENABLED) ?
+ &e1000_intr_msi : &e1000_intr),
+ irq_flags, netdev->name, netdev);
if (err) {
- e_err("Unable to allocate %s interrupt (return: %d)\n",
- adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err);
- if (adapter->flags & FLAG_MSI_ENABLED)
+ if (adapter->flags & FLAG_MSI_ENABLED) {
pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~FLAG_MSI_ENABLED;
+ }
+ e_err("Unable to allocate interrupt, Error: %d\n", err);
}
return err;
@@ -2592,6 +2605,135 @@ err:
}
/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ e_dbg("%s: icr is %08X\n", netdev->name, icr);
+ if (icr & E1000_ICR_RXSEQ) {
+ adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+ wmb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* poll_enable hasn't been called yet, so don't need disable */
+ /* clear any pending events */
+ er32(ICR);
+
+ /* free the real vector and request a test handler */
+ e1000_free_irq(adapter);
+
+ /* Assume that the test fails, if it succeeds then the test
+ * MSI irq handler will unset this flag */
+ adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (err)
+ goto msi_test_failed;
+
+ err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
+ netdev->name, netdev);
+ if (err) {
+ pci_disable_msi(adapter->pdev);
+ goto msi_test_failed;
+ }
+
+ wmb();
+
+ e1000_irq_enable(adapter);
+
+ /* fire an unusual interrupt on the test handler */
+ ew32(ICS, E1000_ICS_RXSEQ);
+ e1e_flush();
+ msleep(50);
+
+ e1000_irq_disable(adapter);
+
+ rmb();
+
+ if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+ err = -EIO;
+ e_info("MSI interrupt test failed!\n");
+ }
+
+ free_irq(adapter->pdev->irq, netdev);
+ pci_disable_msi(adapter->pdev);
+
+ if (err == -EIO)
+ goto msi_test_failed;
+
+ /* okay so the test worked, restore settings */
+ e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
+msi_test_failed:
+ /* restore the original vector, even if it failed */
+ e1000_request_irq(adapter);
+ return err;
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+ int err;
+ u16 pci_cmd;
+
+ if (!(adapter->flags & FLAG_MSI_ENABLED))
+ return 0;
+
+ /* disable SERR in case the MSI write causes a master abort */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_word(adapter->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
+
+ err = e1000_test_msi_interrupt(adapter);
+
+ /* restore previous setting of command word */
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+
+ /* success ! */
+ if (!err)
+ return 0;
+
+ /* EIO means MSI test failed */
+ if (err != -EIO)
+ return err;
+
+ /* back to INTx mode */
+ e_warn("MSI interrupt test failed, using legacy interrupt.\n");
+
+ e1000_free_irq(adapter);
+
+ err = e1000_request_irq(adapter);
+
+ return err;
+}
+
+/**
* e1000_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2649,6 +2791,19 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ /*
+ * Work around PCIe errata with MSI interrupts causing some chipsets to
+ * ignore e1000e MSI messages, which means we need to test our MSI
+ * interrupt now
+ */
+ {
+ err = e1000_test_msi(adapter);
+ if (err) {
+ e_err("Interrupt allocation failed\n");
+ goto err_req_irq;
+ }
+ }
+
/* From here on the code is the same as e1000e_up() */
clear_bit(__E1000_DOWN, &adapter->state);
@@ -3055,7 +3210,7 @@ static void e1000_watchdog_task(struct work_struct *work)
case SPEED_10:
txb2b = 0;
netdev->tx_queue_len = 10;
- adapter->tx_timeout_factor = 14;
+ adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = 0;
@@ -3721,7 +3876,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
struct e1000_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
e_err("Invalid MTU setting\n");
return -EINVAL;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 8effc31..ed912e0 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -324,14 +324,27 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr = 20000;
break;
default:
- e1000_validate_option(&adapter->itr, &opt,
- adapter);
/*
- * save the setting, because the dynamic bits
- * change itr. clear the lower two bits
- * because they are used as control
+ * Save the setting, because the dynamic bits
+ * change itr.
*/
- adapter->itr_setting = adapter->itr & ~3;
+ if (e1000_validate_option(&adapter->itr, &opt,
+ adapter) &&
+ (adapter->itr == 3)) {
+ /*
+ * In case of invalid user value,
+ * default to conservative mode.
+ */
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ } else {
+ /*
+ * Clear the lower two bits because
+ * they are used as control.
+ */
+ adapter->itr_setting =
+ adapter->itr & ~3;
+ }
break;
}
} else {
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 053971e..331b86b 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5522,7 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_CHECKSUM) {
np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features |= NETIF_F_TSO;
}
@@ -5835,7 +5835,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
- dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ?
+ dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
"vlan " : "",
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 9a51ec8..9d46182 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -792,6 +792,10 @@ static int fs_enet_open(struct net_device *dev)
int r;
int err;
+ /* to initialize the fep->cur_rx,... */
+ /* not doing this, will cause a crash in fs_enet_rx_napi */
+ fs_init_bds(fep->ndev);
+
if (fep->fpi->use_napi)
napi_enable(&fep->napi);
@@ -1167,6 +1171,10 @@ static struct of_device_id fs_enet_match[] = {
.compatible = "fsl,cpm1-scc-enet",
.data = (void *)&fs_scc_ops,
},
+ {
+ .compatible = "fsl,cpm2-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
#endif
#ifdef CONFIG_FS_ENET_HAS_FCC
{
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 029b3c7e..22f50dd 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -47,7 +47,6 @@
#include "fs_enet.h"
/*************************************************/
-
#if defined(CONFIG_CPM1)
/* for a 8xx __raw_xxx's are sufficient */
#define __fs_out32(addr, x) __raw_writel(x, addr)
@@ -62,6 +61,8 @@
#define __fs_out16(addr, x) out_be16(addr, x)
#define __fs_in32(addr) in_be32(addr)
#define __fs_in16(addr) in_be16(addr)
+#define __fs_out8(addr, x) out_8(addr, x)
+#define __fs_in8(addr) in_8(addr)
#endif
/* write, read, set bits, clear bits */
@@ -262,8 +263,13 @@ static void restart(struct net_device *dev)
/* Initialize function code registers for big-endian.
*/
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
+#else
W8(ep, sen_genscc.scc_rfcr, SCC_EB);
W8(ep, sen_genscc.scc_tfcr, SCC_EB);
+#endif
/* Set maximum bytes per receive buffer.
* This appears to be an Ethernet frame size, not the buffer
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ca6cf6e..4320a98 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -105,6 +105,7 @@ const char gfar_driver_version[] = "1.3";
static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
@@ -134,9 +135,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l
static void gfar_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
void gfar_halt(struct net_device *dev);
-#ifdef CONFIG_PM
static void gfar_halt_nodisable(struct net_device *dev);
-#endif
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
@@ -211,6 +210,7 @@ static int gfar_probe(struct platform_device *pdev)
spin_lock_init(&priv->txlock);
spin_lock_init(&priv->rxlock);
spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(pdev, dev);
@@ -631,7 +631,6 @@ static void init_registers(struct net_device *dev)
}
-#ifdef CONFIG_PM
/* Halt the receive and transmit queues */
static void gfar_halt_nodisable(struct net_device *dev)
{
@@ -657,7 +656,6 @@ static void gfar_halt_nodisable(struct net_device *dev)
cpu_relax();
}
}
-#endif
/* Halt the receive and transmit queues */
void gfar_halt(struct net_device *dev)
@@ -666,6 +664,8 @@ void gfar_halt(struct net_device *dev)
struct gfar __iomem *regs = priv->regs;
u32 tempval;
+ gfar_halt_nodisable(dev);
+
/* Disable Rx and Tx */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
@@ -1214,6 +1214,7 @@ static int gfar_close(struct net_device *dev)
napi_disable(&priv->napi);
+ cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
/* Disconnect from the PHY */
@@ -1328,13 +1329,16 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-/* gfar_timeout gets called when a packet has not been
+/* gfar_reset_task gets scheduled when a packet has not been
* transmitted after a set amount of time.
* For now, assume that clearing out all the structures, and
- * starting over will fix the problem. */
-static void gfar_timeout(struct net_device *dev)
+ * starting over will fix the problem.
+ */
+static void gfar_reset_task(struct work_struct *work)
{
- dev->stats.tx_errors++;
+ struct gfar_private *priv = container_of(work, struct gfar_private,
+ reset_task);
+ struct net_device *dev = priv->dev;
if (dev->flags & IFF_UP) {
stop_gfar(dev);
@@ -1344,6 +1348,14 @@ static void gfar_timeout(struct net_device *dev)
netif_tx_schedule_all(dev);
}
+static void gfar_timeout(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+ schedule_work(&priv->reset_task);
+}
+
/* Interrupt Handler for Transmit complete */
static int gfar_clean_tx_ring(struct net_device *dev)
{
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index d59df98..f46e9b6 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -756,6 +756,7 @@ struct gfar_private {
uint32_t msg_enable;
+ struct work_struct reset_task;
/* Network Statistics */
struct gfar_extra_stats extra_stats;
};
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 5116f68..782c201 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -33,7 +33,6 @@
#include <asm/uaccess.h>
#include <linux/module.h>
-#include <linux/version.h>
#include "gianfar.h"
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 2e720f2..ccd9d90 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -663,9 +663,6 @@ static int emac_configure(struct emac_instance *dev)
if (emac_phy_gpcs(dev->phy.mode))
emac_mii_reset_phy(&dev->phy);
- /* Required for Pause packet support in EMAC */
- dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
-
return 0;
}
@@ -1150,6 +1147,9 @@ static int emac_open(struct net_device *ndev)
} else
netif_carrier_on(dev->ndev);
+ /* Required for Pause packet support in EMAC */
+ dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
+
emac_configure(dev);
mal_poll_add(dev->mal, &dev->commac);
mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a03fe1f..c2d57f8 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long data_dma_addr;
desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
- data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0;
}
+ data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
ibmveth_error_printk("tx: unable to map xmit buffer\n");
@@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc.fields.address = adapter->bounce_buffer_dma;
tx_map_failed++;
used_bounce = 1;
+ wmb();
} else
desc.fields.address = data_dma_addr;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index bb823ac..f5e2e72 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -87,7 +87,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
mac->type = e1000_82576;
break;
default:
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index a65ccc3..99504a6 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -41,7 +41,6 @@ struct e1000_hw;
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
-#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 11aee13..58906c9 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -373,13 +373,17 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[12] = rd32(E1000_EECD);
/* Interrupt */
- regs_buff[13] = rd32(E1000_EICR);
+ /* Reading EICS for EICR because they read the
+ * same but EICS does not clear on read */
+ regs_buff[13] = rd32(E1000_EICS);
regs_buff[14] = rd32(E1000_EICS);
regs_buff[15] = rd32(E1000_EIMS);
regs_buff[16] = rd32(E1000_EIMC);
regs_buff[17] = rd32(E1000_EIAC);
regs_buff[18] = rd32(E1000_EIAM);
- regs_buff[19] = rd32(E1000_ICR);
+ /* Reading ICS for ICR because they read the
+ * same but ICS does not clear on read */
+ regs_buff[19] = rd32(E1000_ICS);
regs_buff[20] = rd32(E1000_ICS);
regs_buff[21] = rd32(E1000_IMS);
regs_buff[22] = rd32(E1000_IMC);
@@ -1746,15 +1750,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* return success for non excluded adapter ports */
retval = 0;
break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 8f66e15..634c4c9 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -61,7 +61,6 @@ static struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -521,7 +520,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries,
numvecs);
if (err == 0)
- return;
+ goto out;
igb_reset_interrupt_capability(adapter);
@@ -531,7 +530,7 @@ msi_only:
adapter->num_tx_queues = 1;
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
-
+out:
/* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
return;
@@ -1217,16 +1216,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- /* if quad port adapter, disable WoL on all but port A */
- if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
- else
- adapter->flags |= IGB_FLAG_QUAD_PORT_A;
- /* Reset for multiple quad port adapters */
- if (++global_quad_port_a == 4)
- global_quad_port_a = 0;
- break;
}
/* initialize the wol settings based on the eeprom settings */
@@ -2290,7 +2279,9 @@ static void igb_watchdog_task(struct work_struct *work)
struct igb_ring *tx_ring = adapter->tx_ring;
struct e1000_mac_info *mac = &adapter->hw.mac;
u32 link;
+ u32 eics = 0;
s32 ret_val;
+ int i;
if ((netif_carrier_ok(netdev)) &&
(rd32(E1000_STATUS) & E1000_STATUS_LU))
@@ -2392,7 +2383,13 @@ link_up:
}
/* Cause software interrupt to ensure rx ring is cleaned */
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ if (adapter->msix_entries) {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ eics |= adapter->rx_ring[i].eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
/* Force detection of hung controller every watchdog period */
tx_ring->detect_tx_hung = true;
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index e0e718a..dd9318f 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -7,7 +7,6 @@
#ifndef __LINUX_IPG_H
#define __LINUX_IPG_H
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -21,7 +20,6 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/version.h>
#include <asm/bitops.h>
/*
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 2f38e84..f96358b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -190,6 +190,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82598AT_DUAL_PORT:
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e5f3da8..53f41b6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "1.3.18-k2"
+#define DRV_VERSION "1.3.18-k4"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2007 Intel Corporation.";
@@ -72,6 +72,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
+ board_82598 },
/* required last entry */
{0, }
@@ -1634,16 +1636,17 @@ static void ixgbe_set_multi(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr;
u8 *mta_list;
- u32 fctrl;
+ u32 fctrl, vlnctrl;
int i;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (netdev->flags & IFF_PROMISC) {
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- fctrl &= ~IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
@@ -1651,10 +1654,11 @@ static void ixgbe_set_multi(struct net_device *netdev)
} else {
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
}
- fctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
if (netdev->mc_count) {
mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 1ad7cb9..c0282a2 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -39,6 +39,7 @@
#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
/* General Registers */
#define IXGBE_CTRL 0x00000
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 49f6bc0..3b43bfd 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -64,68 +64,6 @@ struct pcpu_lstats {
unsigned long bytes;
};
-/* KISS: just allocate small chunks and copy bits.
- *
- * So, in fact, this is documentation, explaining what we expect
- * of largesending device modulo TCP checksum, which is ignored for loopback.
- */
-
-#ifdef LOOPBACK_TSO
-static void emulate_large_send_offload(struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
- struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
- (iph->ihl * 4));
- unsigned int doffset = (iph->ihl + th->doff) * 4;
- unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
- unsigned int offset = 0;
- u32 seq = ntohl(th->seq);
- u16 id = ntohs(iph->id);
-
- while (offset + doffset < skb->len) {
- unsigned int frag_size = min(mtu, skb->len - offset) - doffset;
- struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC);
-
- if (!nskb)
- break;
- skb_reserve(nskb, 32);
- skb_set_mac_header(nskb, -ETH_HLEN);
- skb_reset_network_header(nskb);
- iph = ip_hdr(nskb);
- skb_copy_to_linear_data(nskb, skb_network_header(skb),
- doffset);
- if (skb_copy_bits(skb,
- doffset + offset,
- nskb->data + doffset,
- frag_size))
- BUG();
- skb_put(nskb, doffset + frag_size);
- nskb->ip_summed = CHECKSUM_UNNECESSARY;
- nskb->dev = skb->dev;
- nskb->priority = skb->priority;
- nskb->protocol = skb->protocol;
- nskb->dst = dst_clone(skb->dst);
- memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
- nskb->pkt_type = skb->pkt_type;
-
- th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
- iph->tot_len = htons(frag_size + doffset);
- iph->id = htons(id);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl);
- th->seq = htonl(seq);
- if (offset + doffset + frag_size < skb->len)
- th->fin = th->psh = 0;
- netif_rx(nskb);
- offset += frag_size;
- seq += frag_size;
- id++;
- }
-
- dev_kfree_skb(skb);
-}
-#endif /* LOOPBACK_TSO */
-
/*
* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
@@ -137,9 +75,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
skb->protocol = eth_type_trans(skb,dev);
-#ifndef LOOPBACK_MUST_CHECKSUM
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-#endif
#ifdef LOOPBACK_TSO
if (skb_is_gso(skb)) {
@@ -234,9 +169,7 @@ static void loopback_setup(struct net_device *dev)
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
-#ifdef LOOPBACK_TSO
| NETIF_F_TSO
-#endif
| NETIF_F_NO_CSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 46819af..0a18b9e 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,7 @@
#include <asm/system.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
-static char mv643xx_eth_driver_version[] = "1.2";
+static char mv643xx_eth_driver_version[] = "1.3";
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
@@ -474,11 +474,19 @@ static void rxq_refill(struct rx_queue *rxq)
/*
* Reserve 2+14 bytes for an ethernet header (the
* hardware automatically prepends 2 bytes of dummy
- * data to each received packet), 4 bytes for a VLAN
- * header, and 4 bytes for the trailing FCS -- 24
- * bytes total.
+ * data to each received packet), 16 bytes for up to
+ * four VLAN tags, and 4 bytes for the trailing FCS
+ * -- 36 bytes total.
*/
- skb_size = mp->dev->mtu + 24;
+ skb_size = mp->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8
+ * bytes, as the lower three bits of the receive
+ * descriptor's buffer size field are ignored by
+ * the hardware.
+ */
+ skb_size = (skb_size + 7) & ~7;
skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
if (skb == NULL)
@@ -509,10 +517,8 @@ static void rxq_refill(struct rx_queue *rxq)
skb_reserve(skb, 2);
}
- if (rxq->rx_desc_count != rxq->rx_ring_size) {
- rxq->rx_oom.expires = jiffies + (HZ / 10);
- add_timer(&rxq->rx_oom);
- }
+ if (rxq->rx_desc_count != rxq->rx_ring_size)
+ mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
spin_unlock_irqrestore(&mp->lock, flags);
}
@@ -529,7 +535,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
int rx;
rx = 0;
- while (rx < budget) {
+ while (rx < budget && rxq->rx_desc_count) {
struct rx_desc *rx_desc;
unsigned int cmd_sts;
struct sk_buff *skb;
@@ -554,7 +560,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
spin_unlock_irqrestore(&mp->lock, flags);
dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
- mp->dev->mtu + 24, DMA_FROM_DEVICE);
+ rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--;
rx++;
@@ -636,9 +642,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
txq_reclaim(mp->txq + i, 0);
if (netif_carrier_ok(mp->dev)) {
- spin_lock(&mp->lock);
+ spin_lock_irq(&mp->lock);
__txq_maybe_wake(mp->txq + mp->txq_primary);
- spin_unlock(&mp->lock);
+ spin_unlock_irq(&mp->lock);
}
}
#endif
@@ -650,8 +656,6 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (rx < budget) {
netif_rx_complete(mp->dev, napi);
- wrl(mp, INT_CAUSE(mp->port_num), 0);
- wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
}
@@ -1796,6 +1800,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
*/
#ifdef MV643XX_ETH_NAPI
if (int_cause & INT_RX) {
+ wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
wrl(mp, INT_MASK(mp->port_num), 0x00000000);
rdl(mp, INT_MASK(mp->port_num));
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f1de38f..d6524db 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -56,7 +56,6 @@
#include <linux/ethtool.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
@@ -76,7 +75,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.3.99-1.347"
+#define MYRI10GE_VERSION_STR "1.4.3-1.358"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -3548,7 +3547,11 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
- if (old_fw == myri10ge_fw_aligned)
+ if (myri10ge_fw_name != NULL) {
+ dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
+ myri10ge_fw_name);
+ mgp->fw_name = myri10ge_fw_name;
+ } else if (old_fw == myri10ge_fw_aligned)
mgp->fw_name = myri10ge_fw_rss_aligned;
else
mgp->fw_name = myri10ge_fw_rss_unaligned;
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 42443d6..fa3ceca 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -118,7 +118,7 @@ bad_clone_list[] __initdata = {
{"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
-#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+#ifdef CONFIG_MACH_TX49XX
{"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
#endif
{"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
@@ -142,7 +142,7 @@ bad_clone_list[] __initdata = {
#if defined(CONFIG_PLAT_MAPPI)
# define DCR_VAL 0x4b
#elif defined(CONFIG_PLAT_OAKS32R) || \
- defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+ defined(CONFIG_MACH_TX49XX)
# define DCR_VAL 0x48 /* 8-bit mode */
#else
# define DCR_VAL 0x49
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 93a7b9b..244ab49 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -45,7 +45,6 @@
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
-#include <linux/version.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
@@ -66,8 +65,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 0
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.0"
+#define _NETXEN_NIC_LINUX_SUBVERSION 11
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.11"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
@@ -1615,7 +1614,8 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
int netxen_is_flash_supported(struct netxen_adapter *adapter);
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
extern void netxen_change_ringparam(struct netxen_adapter *adapter);
extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 4ad3e08..b974ca0 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -38,7 +38,6 @@
#include <asm/io.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
-#include <linux/version.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index e8e8d73..e80f9e3 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -32,8 +32,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
-
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <linux/init.h>
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 9aa20f9..84978f8 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -733,31 +733,56 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
return 0;
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[])
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
{
- __le32 *pmac = (__le32 *) & mac[0];
+ __le32 *pmac = (__le32 *) mac;
+ u32 offset;
- if (netxen_get_flash_block(adapter,
- NETXEN_USER_START +
- offsetof(struct netxen_new_user_info,
- mac_addr),
- FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) {
+ offset = NETXEN_USER_START +
+ offsetof(struct netxen_new_user_info, mac_addr) +
+ adapter->portnum * sizeof(u64);
+
+ if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
return -1;
- }
+
if (*mac == cpu_to_le64(~0ULL)) {
+
+ offset = NETXEN_USER_START_OLD +
+ offsetof(struct netxen_user_old_info, mac_addr) +
+ adapter->portnum * sizeof(u64);
+
if (netxen_get_flash_block(adapter,
- NETXEN_USER_START_OLD +
- offsetof(struct netxen_user_old_info,
- mac_addr),
- FLASH_NUM_PORTS * sizeof(u64),
- pmac) == -1)
+ offset, sizeof(u64), pmac) == -1)
return -1;
+
if (*mac == cpu_to_le64(~0ULL))
return -1;
}
return 0;
}
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+{
+ uint32_t crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4);
+ adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4);
+
+ mac_hi = cpu_to_le32(mac_hi);
+ mac_lo = cpu_to_le32(mac_lo);
+
+ if (pci_func & 1)
+ *mac = ((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = ((mac_lo) | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
#define CRB_WIN_LOCK_TIMEOUT 100000000
static int crb_win_lock(struct netxen_adapter *adapter)
@@ -2183,10 +2208,10 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
if (adapter->portnum == 0) {
get_brd_name_by_type(board_info->board_type, brd_name);
- printk("NetXen %s Board S/N %s Chip id 0x%x\n",
- brd_name, serial_num, board_info->chip_id);
- printk("NetXen Firmware version %d.%d.%d\n", fw_major,
- fw_minor, fw_build);
+ printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n",
+ brd_name, serial_num, adapter->ahw.revision_id);
+ printk(KERN_INFO "NetXen Firmware version %d.%d.%d\n",
+ fw_major, fw_minor, fw_build);
}
if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) <
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 519fc86..5bba675 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1079,10 +1079,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
void netxen_free_adapter_offload(struct netxen_adapter *adapter)
{
- int i;
+ int i = 100;
+
+ if (!adapter->dummy_dma.addr)
+ return;
- if (adapter->dummy_dma.addr) {
- i = 100;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
do {
if (dma_watchdog_shutdown_request(adapter) == 1)
break;
@@ -1090,17 +1092,17 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter)
if (dma_watchdog_shutdown_poll_result(adapter) == 1)
break;
} while (--i);
+ }
- if (i) {
- pci_free_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- adapter->dummy_dma.addr,
- adapter->dummy_dma.phys_addr);
- adapter->dummy_dma.addr = NULL;
- } else {
- printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
- adapter->netdev->name);
- }
+ if (i) {
+ pci_free_consistent(adapter->pdev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = NULL;
+ } else {
+ printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
+ adapter->netdev->name);
}
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 7615c71..32bb47a 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -149,76 +149,18 @@ static uint32_t msi_tgt_status[8] = {
static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
-static void netxen_nic_disable_int(struct netxen_adapter *adapter)
+static inline void netxen_nic_disable_int(struct netxen_adapter *adapter)
{
- u32 mask = 0x7ff;
- int retries = 32;
- int pci_fn = adapter->ahw.pci_func;
-
- if (adapter->msi_mode != MSI_MODE_MULTIFUNC)
- adapter->pci_write_normalize(adapter,
- adapter->crb_intr_mask, 0);
-
- if (adapter->intr_scheme != -1 &&
- adapter->intr_scheme != INTR_SCHEME_PERPORT)
- adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
-
- if (!NETXEN_IS_MSI_FAMILY(adapter)) {
- do {
- adapter->pci_write_immediate(adapter,
- adapter->legacy_intr.tgt_status_reg,
- 0xffffffff);
- mask = adapter->pci_read_immediate(adapter,
- ISR_INT_VECTOR);
- if (!(mask & 0x80))
- break;
- udelay(10);
- } while (--retries);
-
- if (!retries) {
- printk(KERN_NOTICE "%s: Failed to disable interrupt\n",
- netxen_nic_driver_name);
- }
- } else {
- if (adapter->msi_mode == MSI_MODE_MULTIFUNC) {
- adapter->pci_write_immediate(adapter,
- msi_tgt_status[pci_fn], 0xffffffff);
- }
- }
+ adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0);
}
-static void netxen_nic_enable_int(struct netxen_adapter *adapter)
+static inline void netxen_nic_enable_int(struct netxen_adapter *adapter)
{
- u32 mask;
-
- if (adapter->intr_scheme != -1 &&
- adapter->intr_scheme != INTR_SCHEME_PERPORT) {
- switch (adapter->ahw.board_type) {
- case NETXEN_NIC_GBE:
- mask = 0x77b;
- break;
- case NETXEN_NIC_XGBE:
- mask = 0x77f;
- break;
- default:
- mask = 0x7ff;
- break;
- }
-
- adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
- }
-
adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1);
- if (!NETXEN_IS_MSI_FAMILY(adapter)) {
- mask = 0xbff;
- if (adapter->intr_scheme == INTR_SCHEME_PERPORT)
- adapter->pci_write_immediate(adapter,
- adapter->legacy_intr.tgt_mask_reg, mask);
- else
- adapter->pci_write_normalize(adapter,
- CRB_INT_VECTOR, 0);
- }
+ if (!NETXEN_IS_MSI_FAMILY(adapter))
+ adapter->pci_write_immediate(adapter,
+ adapter->legacy_intr.tgt_mask_reg, 0xfbff);
}
static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
@@ -501,6 +443,44 @@ static void netxen_init_msix_entries(struct netxen_adapter *adapter)
adapter->msix_entries[i].entry = i;
}
+static int
+netxen_read_mac_addr(struct netxen_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ __le64 mac_addr;
+ DECLARE_MAC_BUF(mac);
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (netxen_is_flash_supported(adapter) != 0)
+ return -EIO;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ } else {
+ if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ }
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ dev_warn(&pdev->dev, "Bad MAC address %s.\n",
+ print_mac(mac, netdev->dev_addr));
+ } else
+ adapter->macaddr_set(adapter, netdev->dev_addr);
+
+ return 0;
+}
+
/*
* netxen_nic_probe()
*
@@ -529,10 +509,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0;
int i = 0, err;
int first_driver, first_boot;
- __le64 mac_addr[FLASH_NUM_PORTS + 1];
u32 val;
int pci_func_id = PCI_FUNC(pdev->devfn);
- DECLARE_MAC_BUF(mac);
struct netxen_legacy_intr_set *legacy_intrp;
uint8_t revision_id;
@@ -545,6 +523,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
+ if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
+ printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
+ "will not be enabled.\n",
+ NX_P3_A0, NX_P3_B1);
+ return -ENODEV;
+ }
+
if ((err = pci_enable_device(pdev)))
return err;
@@ -898,34 +883,14 @@ request_msi:
goto err_out_disable_msi;
init_timer(&adapter->watchdog_timer);
- adapter->ahw.linkup = 0;
adapter->watchdog_timer.function = &netxen_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
- if (netxen_is_flash_supported(adapter) == 0 &&
- netxen_get_flash_mac_addr(adapter, mac_addr) == 0) {
- unsigned char *p;
-
- p = (unsigned char *)&mac_addr[adapter->portnum];
- netdev->dev_addr[0] = *(p + 5);
- netdev->dev_addr[1] = *(p + 4);
- netdev->dev_addr[2] = *(p + 3);
- netdev->dev_addr[3] = *(p + 2);
- netdev->dev_addr[4] = *(p + 1);
- netdev->dev_addr[5] = *(p + 0);
-
- memcpy(netdev->perm_addr, netdev->dev_addr,
- netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
- printk(KERN_ERR "%s: Bad MAC address %s.\n",
- netxen_nic_driver_name,
- print_mac(mac, netdev->dev_addr));
- } else {
- adapter->macaddr_set(adapter, netdev->dev_addr);
- }
- }
+ err = netxen_read_mac_addr(adapter);
+ if (err)
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -1000,6 +965,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
netxen_free_hw_resources(adapter);
+ netxen_release_rx_buffers(adapter);
netxen_free_sw_resources(adapter);
}
@@ -1069,6 +1035,15 @@ static int netxen_nic_open(struct net_device *netdev)
goto err_out_free_sw;
}
+ if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) ||
+ (adapter->intr_scheme != INTR_SCHEME_PERPORT)) {
+ printk(KERN_ERR "%s: Firmware interrupt scheme is "
+ "incompatible with driver\n",
+ netdev->name);
+ adapter->driver_mismatch = 1;
+ goto err_out_free_hw;
+ }
+
if (adapter->fw_major < 4) {
adapter->crb_addr_cmd_producer =
crb_cmd_producer[adapter->portnum];
@@ -1094,7 +1069,7 @@ static int netxen_nic_open(struct net_device *netdev)
flags, netdev->name, adapter);
if (err) {
printk(KERN_ERR "request_irq failed with: %d\n", err);
- goto err_out_free_hw;
+ goto err_out_free_rxbuf;
}
adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
@@ -1116,6 +1091,7 @@ static int netxen_nic_open(struct net_device *netdev)
if (adapter->set_mtu)
adapter->set_mtu(adapter, netdev->mtu);
+ adapter->ahw.linkup = 0;
mod_timer(&adapter->watchdog_timer, jiffies);
napi_enable(&adapter->napi);
@@ -1127,6 +1103,8 @@ static int netxen_nic_open(struct net_device *netdev)
err_out_free_irq:
free_irq(adapter->irq, adapter);
+err_out_free_rxbuf:
+ netxen_release_rx_buffers(adapter);
err_out_free_hw:
netxen_free_hw_resources(adapter);
err_out_free_sw:
@@ -1152,10 +1130,8 @@ static int netxen_nic_close(struct net_device *netdev)
netxen_release_tx_buffers(adapter);
- if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
- FLUSH_SCHEDULED_WORK();
- del_timer_sync(&adapter->watchdog_timer);
- }
+ FLUSH_SCHEDULED_WORK();
+ del_timer_sync(&adapter->watchdog_timer);
return 0;
}
@@ -1458,7 +1434,8 @@ void netxen_watchdog_task(struct work_struct *work)
netxen_nic_handle_phy_intr(adapter);
- mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+ if (netif_running(adapter->netdev))
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
}
static void netxen_tx_timeout(struct net_device *netdev)
@@ -1518,18 +1495,9 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
return stats;
}
-static inline void
-netxen_handle_int(struct netxen_adapter *adapter)
-{
- netxen_nic_disable_int(adapter);
- napi_schedule(&adapter->napi);
-}
-
static irqreturn_t netxen_intr(int irq, void *data)
{
struct netxen_adapter *adapter = data;
- u32 our_int = 0;
-
u32 status = 0;
status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
@@ -1544,22 +1512,32 @@ static irqreturn_t netxen_intr(int irq, void *data)
if (!ISR_LEGACY_INT_TRIGGERED(status))
return IRQ_NONE;
- } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ } else {
+ unsigned long our_int = 0;
our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
+
/* not our interrupt */
- if ((our_int & (0x80 << adapter->portnum)) == 0)
+ if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
return IRQ_NONE;
- if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
- /* claim interrupt */
- adapter->pci_write_normalize(adapter,
- CRB_INT_VECTOR,
- our_int & ~((u32)(0x80 << adapter->portnum)));
- }
+ /* claim interrupt */
+ adapter->pci_write_normalize(adapter,
+ CRB_INT_VECTOR, (our_int & 0xffffffff));
}
- netxen_handle_int(adapter);
+ /* clear interrupt */
+ if (adapter->fw_major < 4)
+ netxen_nic_disable_int(adapter);
+
+ adapter->pci_write_immediate(adapter,
+ adapter->legacy_intr.tgt_status_reg,
+ 0xffffffff);
+ /* read twice to ensure write is flushed */
+ adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
+ adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
+
+ napi_schedule(&adapter->napi);
return IRQ_HANDLED;
}
@@ -1568,7 +1546,11 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
{
struct netxen_adapter *adapter = data;
- netxen_handle_int(adapter);
+ /* clear interrupt */
+ adapter->pci_write_immediate(adapter,
+ msi_tgt_status[adapter->ahw.pci_func], 0xffffffff);
+
+ napi_schedule(&adapter->napi);
return IRQ_HANDLED;
}
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 83e5ee5..b293adc 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -125,6 +125,8 @@
#define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4)
#define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8)
+#define CRB_MAC_BLOCK_START NETXEN_CAM_RAM(0x1c0)
+
/*
* capabilities register, can be used to selectively enable/disable features
* for backward compability
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index b35d794..88f03c9 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -46,7 +46,6 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index f929882..ff175e8 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -61,7 +61,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/string.h>
#include <linux/list.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 6531ff5..5d86281 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index a3e3895..0f6f974 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2792,7 +2792,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
pkt_size, PCI_DMA_FROMDEVICE);
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
- pci_unmap_single(pdev, addr, pkt_size,
+ pci_unmap_single(pdev, addr, tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 25e62cf..1c370e6 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -20,7 +20,6 @@
* the file called "COPYING".
*/
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index 889f987..a85efcf 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -510,7 +510,7 @@ static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
chg->path.para.p_type = SMT_P320B ;
chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
chg->path.mib_index = SBAPATHINDEX ;
- chg->path.path_pad = (u_short)NULL ;
+ chg->path.path_pad = 0;
chg->path.path_index = PRIMARY_RING ;
/* set P320F */
@@ -606,7 +606,7 @@ static void ess_send_alc_req(struct s_smc *smc)
req->path.para.p_type = SMT_P320B ;
req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
req->path.mib_index = SBAPATHINDEX ;
- req->path.path_pad = (u_short)NULL ;
+ req->path.path_pad = 0;
req->path.path_index = PRIMARY_RING ;
/* set P0017 */
@@ -636,7 +636,7 @@ static void ess_send_alc_req(struct s_smc *smc)
/* set P19 */
req->a_addr.para.p_type = SMT_P0019 ;
req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ;
- req->a_addr.sba_pad = (u_short)NULL ;
+ req->a_addr.sba_pad = 0;
req->a_addr.alloc_addr = null_addr ;
/* set P1A */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7d29edc..e24b25c 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -24,7 +24,6 @@
#include <linux/crc32.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
@@ -666,11 +665,16 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
if (hw->chip_id != CHIP_ID_YUKON_EC) {
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ /* select page 2 to access MAC control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
/* enable Power Down */
ctrl |= PHY_M_PC_POW_D_ENA;
gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
}
/* set IEEE compatible Power Down Mode (dev. #4.99) */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 2040965..24768c1 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2255,7 +2255,7 @@ static int smc_drv_remove(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
if (!res)
- platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, SMC_IO_EXTENT);
free_netdev(ndev);
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index c66dfc9..7db48f1 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -27,7 +27,6 @@
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/if_vlan.h>
-#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <asm/byteorder.h>
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index d2439b8..71d2c5c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -66,8 +66,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.93"
-#define DRV_MODULE_RELDATE "May 22, 2008"
+#define DRV_MODULE_VERSION "3.94"
+#define DRV_MODULE_RELDATE "August 14, 2008"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -536,6 +536,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return 0;
switch (locknum) {
+ case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
break;
default:
@@ -573,6 +574,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
switch (locknum) {
+ case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
break;
default:
@@ -1018,15 +1020,43 @@ static void tg3_mdio_fini(struct tg3 *tp)
}
/* tp->lock is held. */
+static inline void tg3_generate_fw_event(struct tg3 *tp)
+{
+ u32 val;
+
+ val = tr32(GRC_RX_CPU_EVENT);
+ val |= GRC_RX_CPU_DRIVER_EVENT;
+ tw32_f(GRC_RX_CPU_EVENT, val);
+
+ tp->last_event_jiffies = jiffies;
+}
+
+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
+
+/* tp->lock is held. */
static void tg3_wait_for_event_ack(struct tg3 *tp)
{
int i;
+ unsigned int delay_cnt;
+ long time_remain;
+
+ /* If enough time has passed, no wait is necessary. */
+ time_remain = (long)(tp->last_event_jiffies + 1 +
+ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
+ (long)jiffies;
+ if (time_remain < 0)
+ return;
- /* Wait for up to 2.5 milliseconds */
- for (i = 0; i < 250000; i++) {
+ /* Check if we can shorten the wait time. */
+ delay_cnt = jiffies_to_usecs(time_remain);
+ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
+ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
+ delay_cnt = (delay_cnt >> 3) + 1;
+
+ for (i = 0; i < delay_cnt; i++) {
if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
break;
- udelay(10);
+ udelay(8);
}
}
@@ -1075,9 +1105,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
val = 0;
tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
- val = tr32(GRC_RX_CPU_EVENT);
- val |= GRC_RX_CPU_DRIVER_EVENT;
- tw32_f(GRC_RX_CPU_EVENT, val);
+ tg3_generate_fw_event(tp);
}
static void tg3_link_report(struct tg3 *tp)
@@ -2124,6 +2152,13 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
(tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ mac_mode |= tp->mac_mode &
+ (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
+ if (mac_mode & MAC_MODE_APE_TX_EN)
+ mac_mode |= MAC_MODE_TDE_ENABLE;
+ }
+
tw32_f(MAC_MODE, mac_mode);
udelay(100);
@@ -5493,7 +5528,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
return;
apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
- if (apedata != APE_FW_STATUS_READY)
+ if (!(apedata & APE_FW_STATUS_READY))
return;
/* Wait for up to 1 millisecond for APE to service previous event. */
@@ -5760,6 +5795,8 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_mdio_stop(tp);
+ tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
+
/* No matching tg3_nvram_unlock() after this because
* chip reset below will undo the nvram lock.
*/
@@ -5908,12 +5945,19 @@ static int tg3_chip_reset(struct tg3 *tp)
} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
tw32_f(MAC_MODE, tp->mac_mode);
+ } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
+ if (tp->mac_mode & MAC_MODE_APE_TX_EN)
+ tp->mac_mode |= MAC_MODE_TDE_ENABLE;
+ tw32_f(MAC_MODE, tp->mac_mode);
} else
tw32_f(MAC_MODE, 0);
udelay(40);
tg3_mdio_start(tp);
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
+
err = tg3_poll_fw(tp);
if (err)
return err;
@@ -5935,6 +5979,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
+ tp->last_event_jiffies = jiffies;
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
}
@@ -5948,15 +5993,12 @@ static void tg3_stop_fw(struct tg3 *tp)
{
if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
- u32 val;
-
/* Wait for RX cpu to ACK the previous event. */
tg3_wait_for_event_ack(tp);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
- val = tr32(GRC_RX_CPU_EVENT);
- val |= GRC_RX_CPU_DRIVER_EVENT;
- tw32(GRC_RX_CPU_EVENT, val);
+
+ tg3_generate_fw_event(tp);
/* Wait for RX cpu to ACK this event. */
tg3_wait_for_event_ack(tp);
@@ -7406,7 +7448,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
}
- tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ else
+ tp->mac_mode = 0;
+ tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
@@ -7840,9 +7886,8 @@ static void tg3_timer(unsigned long __opaque)
* resets.
*/
if (!--tp->asf_counter) {
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
- u32 val;
-
+ if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
tg3_wait_for_event_ack(tp);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
@@ -7850,9 +7895,8 @@ static void tg3_timer(unsigned long __opaque)
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
/* 5 seconds timeout */
tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
- val = tr32(GRC_RX_CPU_EVENT);
- val |= GRC_RX_CPU_DRIVER_EVENT;
- tw32_f(GRC_RX_CPU_EVENT, val);
+
+ tg3_generate_fw_event(tp);
}
tp->asf_counter = tp->asf_multiplier;
}
@@ -8422,6 +8466,11 @@ static inline unsigned long get_stat64(tg3_stat64_t *val)
return ret;
}
+static inline u64 get_estat64(tg3_stat64_t *val)
+{
+ return ((u64)val->high << 32) | ((u64)val->low);
+}
+
static unsigned long calc_crc_errors(struct tg3 *tp)
{
struct tg3_hw_stats *hw_stats = tp->hw_stats;
@@ -8450,7 +8499,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
#define ESTAT_ADD(member) \
estats->member = old_estats->member + \
- get_stat64(&hw_stats->member)
+ get_estat64(&hw_stats->member)
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
{
@@ -12416,6 +12465,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->misc_host_ctrl);
}
+ /* Preserve the APE MAC_MODE bits */
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ tp->mac_mode = tr32(MAC_MODE) |
+ MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ else
+ tp->mac_mode = TG3_DEF_MAC_MODE;
+
/* these are limited to 10/100 only */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
(grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -13275,7 +13331,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->pdev = pdev;
tp->dev = dev;
tp->pm_cap = pm_cap;
- tp->mac_mode = TG3_DEF_MAC_MODE;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index df07842..f5b8cab 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -325,6 +325,8 @@
#define MAC_MODE_TDE_ENABLE 0x00200000
#define MAC_MODE_RDE_ENABLE 0x00400000
#define MAC_MODE_FHDE_ENABLE 0x00800000
+#define MAC_MODE_APE_RX_EN 0x08000000
+#define MAC_MODE_APE_TX_EN 0x10000000
#define MAC_STATUS 0x00000404
#define MAC_STATUS_PCS_SYNCED 0x00000001
#define MAC_STATUS_SIGNAL_DET 0x00000002
@@ -1889,6 +1891,7 @@
#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
/* APE convenience enumerations. */
+#define TG3_APE_LOCK_GRC 1
#define TG3_APE_LOCK_MEM 4
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
@@ -2429,7 +2432,10 @@ struct tg3 {
struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
+ union {
unsigned long phy_crc_errors;
+ unsigned long last_event_jiffies;
+ };
u32 rx_offset;
u32 tg3_flags;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 85246ed..ec871f6 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -360,8 +360,8 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
{
unsigned long addr;
- addr = tag->buffer[8].address;
- addr |= (tag->buffer[9].address << 16) << 16;
+ addr = tag->buffer[9].address;
+ addr |= (tag->buffer[8].address << 16) << 16;
return (struct sk_buff *) addr;
}
@@ -1984,7 +1984,6 @@ static void TLan_ResetLists( struct net_device *dev )
TLanList *list;
dma_addr_t list_phys;
struct sk_buff *skb;
- void *t = NULL;
priv->txHead = 0;
priv->txTail = 0;
@@ -2022,7 +2021,8 @@ static void TLan_ResetLists( struct net_device *dev )
}
skb_reserve( skb, NET_IP_ALIGN );
- list->buffer[0].address = pci_map_single(priv->pciDev, t,
+ list->buffer[0].address = pci_map_single(priv->pciDev,
+ skb->data,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
TLan_StoreSKB(list, skb);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 47d84cd..59d1673 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -119,7 +119,6 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/version.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
index e7bb349..13ccee6 100644
--- a/drivers/net/tokenring/lanstreamer.h
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -60,8 +60,6 @@
*
*/
-#include <linux/version.h>
-
/* MAX_INTR - the maximum number of times we can loop
* inside the interrupt function before returning
* control to the OS (maximum value is 256)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e6bbc63..6daea0c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -358,6 +358,66 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
return mask;
}
+/* prepad is the amount to reserve at front. len is length after that.
+ * linear is a hint as to how much to copy (usually headers). */
+static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear,
+ gfp_t gfp)
+{
+ struct sk_buff *skb;
+ unsigned int i;
+
+ skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
+ if (skb) {
+ skb_reserve(skb, prepad);
+ skb_put(skb, len);
+ return skb;
+ }
+
+ /* Under a page? Don't bother with paged skb. */
+ if (prepad + len < PAGE_SIZE)
+ return NULL;
+
+ /* Start with a normal skb, and add pages. */
+ skb = alloc_skb(prepad + linear, gfp);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, prepad);
+ skb_put(skb, linear);
+
+ len -= linear;
+
+ for (i = 0; i < MAX_SKB_FRAGS; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ f->page = alloc_page(gfp|__GFP_ZERO);
+ if (!f->page)
+ break;
+
+ f->page_offset = 0;
+ f->size = PAGE_SIZE;
+
+ skb->data_len += PAGE_SIZE;
+ skb->len += PAGE_SIZE;
+ skb->truesize += PAGE_SIZE;
+ skb_shinfo(skb)->nr_frags++;
+
+ if (len < PAGE_SIZE) {
+ len = 0;
+ break;
+ }
+ len -= PAGE_SIZE;
+ }
+
+ /* Too large, or alloc fail? */
+ if (unlikely(len)) {
+ kfree_skb(skb);
+ skb = NULL;
+ }
+
+ return skb;
+}
+
/* Get packet from user space buffer */
static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
{
@@ -391,14 +451,12 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
return -EINVAL;
}
- if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
+ if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) {
tun->dev->stats.rx_dropped++;
return -ENOMEM;
}
- if (align)
- skb_reserve(skb, align);
- if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
+ if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
@@ -748,6 +806,36 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
}
+static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
+{
+ struct tun_struct *tun = file->private_data;
+
+ if (!tun)
+ return -EBADFD;
+
+ DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
+
+ strcpy(ifr->ifr_name, tun->dev->name);
+
+ ifr->ifr_flags = 0;
+
+ if (ifr->ifr_flags & TUN_TUN_DEV)
+ ifr->ifr_flags |= IFF_TUN;
+ else
+ ifr->ifr_flags |= IFF_TAP;
+
+ if (tun->flags & TUN_NO_PI)
+ ifr->ifr_flags |= IFF_NO_PI;
+
+ if (tun->flags & TUN_ONE_QUEUE)
+ ifr->ifr_flags |= IFF_ONE_QUEUE;
+
+ if (tun->flags & TUN_VNET_HDR)
+ ifr->ifr_flags |= IFF_VNET_HDR;
+
+ return 0;
+}
+
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
@@ -833,6 +921,15 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
switch (cmd) {
+ case TUNGETIFF:
+ ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, &ifr, sizeof(ifr)))
+ return -EFAULT;
+ break;
+
case TUNSETNOCSUM:
/* Disable/Enable checksum */
if (arg)
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 8549f11..734ce09 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -128,7 +128,6 @@ static const int multicast_filter_limit = 32;
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/in6.h>
-#include <linux/version.h>
#include <linux/dma-mapping.h>
#include "typhoon.h"
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 68e198b..0973b6e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -154,17 +154,6 @@ config USB_NET_AX8817X
This driver creates an interface named "ethX", where X depends on
what other networking devices you have in use.
-config USB_HSO
- tristate "Option USB High Speed Mobile Devices"
- depends on USB && RFKILL
- default n
- help
- Choose this option if you have an Option HSDPA/HSUPA card.
- These cards support downlink speeds of 7.2Mbps or greater.
-
- To compile this driver as a module, choose M here: the
- module will be called hso.
-
config USB_NET_CDCETHER
tristate "CDC Ethernet support (smart devices such as cable modems)"
depends on USB_USBNET
@@ -337,5 +326,15 @@ config USB_NET_ZAURUS
really need this non-conformant variant of CDC Ethernet (or in
some cases CDC MDLM) protocol, not "g_ether".
+config USB_HSO
+ tristate "Option USB High Speed Mobile Devices"
+ depends on USB && RFKILL
+ default n
+ help
+ Choose this option if you have an Option HSDPA/HSUPA card.
+ These cards support downlink speeds of 7.2Mbps or greater.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hso.
endmenu
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 031d07b..6e42b5a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -102,8 +102,12 @@
#define MAX_RX_URBS 2
-#define get_serial_by_tty(x) \
- (x ? (struct hso_serial *)x->driver_data : NULL)
+static inline struct hso_serial *get_serial_by_tty(struct tty_struct *tty)
+{
+ if (tty)
+ return tty->driver_data;
+ return NULL;
+}
/*****************************************************************************/
/* Debugging functions */
@@ -294,24 +298,25 @@ static int hso_get_activity(struct hso_device *hso_dev);
/* #define DEBUG */
-#define dev2net(x) (x->port_data.dev_net)
-#define dev2ser(x) (x->port_data.dev_serial)
+static inline struct hso_net *dev2net(struct hso_device *hso_dev)
+{
+ return hso_dev->port_data.dev_net;
+}
+
+static inline struct hso_serial *dev2ser(struct hso_device *hso_dev)
+{
+ return hso_dev->port_data.dev_serial;
+}
/* Debugging functions */
#ifdef DEBUG
static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
unsigned int len)
{
- u8 i = 0;
+ static char name[255];
- printk(KERN_DEBUG "[%d:%s]: len %d", line_count, func_name, len);
-
- for (i = 0; i < len; i++) {
- if (!(i % 16))
- printk("\n 0x%03x: ", i);
- printk("%02x ", (unsigned char)buf[i]);
- }
- printk("\n");
+ sprintf(name, "hso[%d:%s]", line_count, func_name);
+ print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len);
}
#define DUMP(buf_, len_) \
@@ -392,7 +397,7 @@ static const struct usb_device_id hso_ids[] = {
{default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */
{icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */
{icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */
- {default_port_device(0x0af0, 0xd033)}, /* Icon-322 */
+ {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */
{USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */
{USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */
{USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */
@@ -528,13 +533,12 @@ static struct hso_serial *get_serial_by_shared_int_and_type(
static struct hso_serial *get_serial_by_index(unsigned index)
{
- struct hso_serial *serial;
+ struct hso_serial *serial = NULL;
unsigned long flags;
- if (!serial_table[index])
- return NULL;
spin_lock_irqsave(&serial_table_lock, flags);
- serial = dev2ser(serial_table[index]);
+ if (serial_table[index])
+ serial = dev2ser(serial_table[index]);
spin_unlock_irqrestore(&serial_table_lock, flags);
return serial;
@@ -561,6 +565,7 @@ static int get_free_serial_index(void)
static void set_serial_by_index(unsigned index, struct hso_serial *serial)
{
unsigned long flags;
+
spin_lock_irqsave(&serial_table_lock, flags);
if (serial)
serial_table[index] = serial->parent;
@@ -569,7 +574,7 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
spin_unlock_irqrestore(&serial_table_lock, flags);
}
-/* log a meaningfull explanation of an USB status */
+/* log a meaningful explanation of an USB status */
static void log_usb_status(int status, const char *function)
{
char *explanation;
@@ -1103,8 +1108,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
/* reset the rts and dtr */
/* do the actual close */
serial->open_count--;
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
if (serial->open_count <= 0) {
- kref_put(&serial->parent->ref, hso_serial_ref_free);
serial->open_count = 0;
if (serial->tty) {
serial->tty->driver_data = NULL;
@@ -1467,7 +1472,8 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
return;
}
hso_put_activity(serial->parent);
- tty_wakeup(serial->tty);
+ if (serial->tty)
+ tty_wakeup(serial->tty);
hso_kick_transmit(serial);
D1(" ");
@@ -1538,7 +1544,8 @@ static void ctrl_callback(struct urb *urb)
clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
} else {
hso_put_activity(serial->parent);
- tty_wakeup(serial->tty);
+ if (serial->tty)
+ tty_wakeup(serial->tty);
/* response to a write command */
hso_kick_transmit(serial);
}
@@ -2606,6 +2613,7 @@ static int hso_resume(struct usb_interface *iface)
"Transmitting lingering data\n");
hso_net_start_xmit(hso_net->skb_tx_buf,
hso_net->net);
+ hso_net->skb_tx_buf = NULL;
}
result = hso_start_net_device(network_table[i]);
if (result)
@@ -2652,7 +2660,7 @@ static void hso_free_interface(struct usb_interface *interface)
hso_stop_net_device(network_table[i]);
cancel_work_sync(&network_table[i]->async_put_intf);
cancel_work_sync(&network_table[i]->async_get_intf);
- if(rfk)
+ if (rfk)
rfkill_unregister(rfk);
hso_free_net_device(network_table[i]);
}
@@ -2723,7 +2731,7 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
}
/* operations setup of the serial interface */
-static struct tty_operations hso_serial_ops = {
+static const struct tty_operations hso_serial_ops = {
.open = hso_serial_open,
.close = hso_serial_close,
.write = hso_serial_write,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index c3d119f..ca9d00c 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -46,6 +46,10 @@
#define MCS7830_VENDOR_ID 0x9710
#define MCS7830_PRODUCT_ID 0x7830
+#define MCS7730_PRODUCT_ID 0x7730
+
+#define SITECOM_VENDOR_ID 0x0DF6
+#define LN_030_PRODUCT_ID 0x0021
#define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \
ADVERTISE_100HALF | ADVERTISE_10FULL | \
@@ -442,6 +446,29 @@ static struct ethtool_ops mcs7830_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
+static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
+{
+ int ret;
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
+ netdev->dev_addr);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
@@ -455,6 +482,7 @@ static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
net->ethtool_ops = &mcs7830_ethtool_ops;
net->set_multicast_list = mcs7830_set_multicast;
mcs7830_set_multicast(net);
+ net->set_mac_address = mcs7830_set_mac_address;
/* reserve space for the status byte on rx */
dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -491,7 +519,16 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static const struct driver_info moschip_info = {
- .description = "MOSCHIP 7830 usb-NET adapter",
+ .description = "MOSCHIP 7830/7730 usb-NET adapter",
+ .bind = mcs7830_bind,
+ .rx_fixup = mcs7830_rx_fixup,
+ .flags = FLAG_ETHER,
+ .in = 1,
+ .out = 2,
+};
+
+static const struct driver_info sitecom_info = {
+ .description = "Sitecom LN-30 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
.flags = FLAG_ETHER,
@@ -504,6 +541,14 @@ static const struct usb_device_id products[] = {
USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
.driver_info = (unsigned long) &moschip_info,
},
+ {
+ USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID),
+ .driver_info = (unsigned long) &moschip_info,
+ },
+ {
+ USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID),
+ .driver_info = (unsigned long) &sitecom_info,
+ },
{},
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index e59255a..6596cd0 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1317,7 +1317,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
break;
case SIOCDEVRESINSTATS :
- if( current->euid != 0 ) /* root only */
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
break;
@@ -1334,7 +1334,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
break;
case SIOCDEVSHWSTATE :
- if( current->euid != 0 ) /* root only */
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock( &nl->lock );
@@ -1355,7 +1355,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
#ifdef CONFIG_SBNI_MULTILINE
case SIOCDEVENSLAVE :
- if( current->euid != 0 ) /* root only */
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
@@ -1370,7 +1370,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
return enslave( dev, slave_dev );
case SIOCDEVEMANSIPATE :
- if( current->euid != 0 ) /* root only */
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
return emancipate( dev );
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 6f9aa16..fa14255 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -337,7 +337,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ei_poll;
#endif
- NS8390p_init(dev, 0);
+ NS8390_init(dev, 0);
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 2028866..0676c6d 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -40,7 +40,6 @@
*
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
@@ -252,7 +251,7 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
return;
pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb(bf->skb);
+ dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
}
@@ -467,6 +466,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
mutex_init(&sc->lock);
spin_lock_init(&sc->rxbuflock);
spin_lock_init(&sc->txbuflock);
+ spin_lock_init(&sc->block);
/* Set private data */
pci_set_drvdata(pdev, hw);
@@ -587,7 +587,6 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
ath5k_stop_hw(sc);
free_irq(pdev->irq, sc);
- pci_disable_msi(pdev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@@ -616,12 +615,10 @@ ath5k_pci_resume(struct pci_dev *pdev)
*/
pci_write_config_byte(pdev, 0x41, 0);
- pci_enable_msi(pdev);
-
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
- goto err_msi;
+ goto err_no_irq;
}
err = ath5k_init(sc);
@@ -642,8 +639,7 @@ ath5k_pci_resume(struct pci_dev *pdev)
return 0;
err_irq:
free_irq(pdev->irq, sc);
-err_msi:
- pci_disable_msi(pdev);
+err_no_irq:
pci_disable_device(pdev);
return err;
}
@@ -2184,8 +2180,11 @@ ath5k_beacon_config(struct ath5k_softc *sc)
sc->imask |= AR5K_INT_SWBA;
- if (ath5k_hw_hasveol(ah))
+ if (ath5k_hw_hasveol(ah)) {
+ spin_lock(&sc->block);
ath5k_beacon_send(sc);
+ spin_unlock(&sc->block);
+ }
}
/* TODO else AP */
@@ -2408,7 +2407,9 @@ ath5k_intr(int irq, void *dev_id)
TSF_TO_TU(tsf),
(unsigned long long) tsf);
} else {
+ spin_lock(&sc->block);
ath5k_beacon_send(sc);
+ spin_unlock(&sc->block);
}
}
if (status & AR5K_INT_RXEOL) {
@@ -2750,6 +2751,11 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
ret = -EOPNOTSUPP;
goto end;
}
+
+ /* Set to a reasonable value. Note that this will
+ * be set to mac80211's value at ath5k_config(). */
+ sc->bintval = 1000;
+
ret = 0;
end:
mutex_unlock(&sc->lock);
@@ -2794,9 +2800,6 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ath5k_hw *ah = sc->ah;
int ret;
- /* Set to a reasonable value. Note that this will
- * be set to mac80211's value at ath5k_config(). */
- sc->bintval = 1000;
mutex_lock(&sc->lock);
if (sc->vif != vif) {
ret = -EIO;
@@ -3055,6 +3058,7 @@ static int
ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ath5k_softc *sc = hw->priv;
+ unsigned long flags;
int ret;
ath5k_debug_dump_skb(sc, skb, "BC ", 1);
@@ -3064,12 +3068,14 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
goto end;
}
+ spin_lock_irqsave(&sc->block, flags);
ath5k_txbuf_free(sc, sc->bbuf);
sc->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, sc->bbuf);
if (ret)
sc->bbuf->skb = NULL;
- else {
+ spin_unlock_irqrestore(&sc->block, flags);
+ if (!ret) {
ath5k_beacon_config(sc);
mmiowb();
}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index d7e03e6..7ec2f37 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -172,6 +172,7 @@ struct ath5k_softc {
struct tasklet_struct txtq; /* tx intr tasklet */
struct ath5k_led tx_led; /* tx led */
+ spinlock_t block; /* protects beacon */
struct ath5k_buf *bbuf; /* beacon buffer */
unsigned int bhalq, /* SW q for outgoing beacons */
bmisscount, /* missed beacon transmits */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index bde162f..a17eb13 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -5017,7 +5017,11 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
- if ((abs(cur_vit_mask - bin)) < 75)
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp = abs(cur_vit_mask - bin);
+
+ if (tmp < 75)
mask_amt = 1;
else
mask_amt = 0;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bd35bb0..bd65c48 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1304,7 +1304,7 @@ EXPORT_SYMBOL(atmel_open);
int atmel_open(struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
- int i, channel;
+ int i, channel, err;
/* any scheduled timer is no longer needed and might screw things up.. */
del_timer_sync(&priv->management_timer);
@@ -1328,8 +1328,9 @@ int atmel_open(struct net_device *dev)
priv->site_survey_state = SITE_SURVEY_IDLE;
priv->station_is_associated = 0;
- if (!reset_atmel_card(dev))
- return -EAGAIN;
+ err = reset_atmel_card(dev);
+ if (err)
+ return err;
if (priv->config_reg_domain) {
priv->reg_domain = priv->config_reg_domain;
@@ -3061,12 +3062,20 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
}
if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
- /* Do opensystem first, then try sharedkey */
+ /* Flip back and forth between WEP auth modes until the max
+ * authentication tries has been exceeded.
+ */
if (system == WLAN_AUTH_OPEN) {
priv->CurrentAuthentTransactionSeqNum = 0x001;
priv->exclude_unencrypted = 1;
send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
return;
+ } else if ( system == WLAN_AUTH_SHARED_KEY
+ && priv->wep_is_on) {
+ priv->CurrentAuthentTransactionSeqNum = 0x001;
+ priv->exclude_unencrypted = 0;
+ send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0);
+ return;
} else if (priv->connect_to_any_BSS) {
int bss_index;
@@ -3580,12 +3589,12 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
if (i == 0) {
printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
- return 0;
+ return -EIO;
}
if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
- return 0;
+ return -ENODEV;
}
/* now check for completion of MAC initialization through
@@ -3609,19 +3618,19 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
if (i == 0) {
printk(KERN_ALERT "%s: MAC failed to initialise.\n",
priv->dev->name);
- return 0;
+ return -EIO;
}
/* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
if ((mr3 & MAC_INIT_COMPLETE) &&
!(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name);
- return 0;
+ return -EIO;
}
if ((mr1 & MAC_INIT_COMPLETE) &&
!(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) {
printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name);
- return 0;
+ return -EIO;
}
atmel_copy_to_host(priv->dev, (unsigned char *)iface,
@@ -3642,7 +3651,7 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
iface->func_ctrl = le16_to_cpu(iface->func_ctrl);
iface->mac_status = le16_to_cpu(iface->mac_status);
- return 1;
+ return 0;
}
/* determine type of memory and MAC address */
@@ -3693,7 +3702,7 @@ static int probe_atmel_card(struct net_device *dev)
/* Standard firmware in flash, boot it up and ask
for the Mac Address */
priv->card_type = CARD_TYPE_SPI_FLASH;
- if (atmel_wakeup_firmware(priv)) {
+ if (atmel_wakeup_firmware(priv) == 0) {
atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
/* got address, now squash it again until the network
@@ -3835,6 +3844,7 @@ static int reset_atmel_card(struct net_device *dev)
struct atmel_private *priv = netdev_priv(dev);
u8 configuration;
int old_state = priv->station_state;
+ int err = 0;
/* data to add to the firmware names, in priority order
this implemenents firmware versioning */
@@ -3868,11 +3878,12 @@ static int reset_atmel_card(struct net_device *dev)
dev->name);
strcpy(priv->firmware_id, "atmel_at76c502.bin");
}
- if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) {
+ err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev);
+ if (err != 0) {
printk(KERN_ALERT
"%s: firmware %s is missing, cannot continue.\n",
dev->name, priv->firmware_id);
- return 0;
+ return err;
}
} else {
int fw_index = 0;
@@ -3901,7 +3912,7 @@ static int reset_atmel_card(struct net_device *dev)
"%s: firmware %s is missing, cannot start.\n",
dev->name, priv->firmware_id);
priv->firmware_id[0] = '\0';
- return 0;
+ return -ENOENT;
}
}
@@ -3926,8 +3937,9 @@ static int reset_atmel_card(struct net_device *dev)
release_firmware(fw_entry);
}
- if (!atmel_wakeup_firmware(priv))
- return 0;
+ err = atmel_wakeup_firmware(priv);
+ if (err != 0)
+ return err;
/* Check the version and set the correct flag for wpa stuff,
old and new firmware is incompatible.
@@ -3968,10 +3980,9 @@ static int reset_atmel_card(struct net_device *dev)
if (!priv->radio_on_broken) {
if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
CMD_STATUS_REJECTED_RADIO_OFF) {
- printk(KERN_INFO
- "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n",
+ printk(KERN_INFO "%s: cannot turn the radio on.\n",
dev->name);
- return 0;
+ return -EIO;
}
}
@@ -4006,7 +4017,7 @@ static int reset_atmel_card(struct net_device *dev)
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
}
- return 1;
+ return 0;
}
static void atmel_send_command(struct atmel_private *priv, int command,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 3bf3a86..7205a93 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -33,7 +33,6 @@
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
-#include <linux/version.h>
#include <linux/firmware.h>
#include <linux/wireless.h>
#include <linux/workqueue.h>
@@ -4615,7 +4614,9 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
if (bus->bustype == SSB_BUSTYPE_PCI) {
pdev = bus->host_pci;
if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) ||
+ IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) ||
IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) ||
+ IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) ||
IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013))
bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST;
}
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 2541c81..1cb77db 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -34,7 +34,6 @@
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
-#include <linux/version.h>
#include <linux/firmware.h>
#include <linux/wireless.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index c6f886e..19a401c 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -157,7 +157,6 @@ that only one external action is invoked at a time.
#include <linux/stringify.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/time.h>
#include <linux/firmware.h>
#include <linux/acpi.h>
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 36e8d2f..dcce3542 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -31,7 +31,6 @@
******************************************************************************/
#include "ipw2200.h"
-#include <linux/version.h>
#ifndef KBUILD_EXTMOD
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index d333696..705c65b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index b3931f6..3f51f36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 22bb269..e258122 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -967,7 +966,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
s = iwl4965_get_sub_band(priv, channel);
if (s >= EEPROM_TX_POWER_BANDS) {
- IWL_ERROR("Tx Power can not find channel %d ", channel);
+ IWL_ERROR("Tx Power can not find channel %d\n", channel);
return -1;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f3d139b..cbc01a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ed09e48..061ffba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 9bd6180..c72f725 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <net/mac80211.h>
struct iwl_priv; /* FIXME: remove */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index bce5383..3715575 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -63,7 +63,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <net/mac80211.h>
@@ -146,7 +145,7 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
{
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
- IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
+ IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
return -ENOENT;
}
return 0;
@@ -227,7 +226,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
if (ret < 0) {
- IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
+ IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
goto err;
}
@@ -254,7 +253,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
- IWL_ERROR("Time out reading EEPROM[%d]", addr);
+ IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
ret = -ETIMEDOUT;
goto done;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 6512834..2eb03ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <net/mac80211.h>
#include "iwl-dev.h" /* FIXME: remove */
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index cb11c4a..4eee1b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 028e305..a099c9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index e5e5846..5d64229 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -27,7 +27,6 @@
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 60a6e01..6283a3a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -207,7 +207,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
case WLAN_HT_CAP_MIMO_PS_DISABLED:
break;
default:
- IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
+ IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode);
break;
}
@@ -969,7 +969,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
return priv->hw_params.bcast_sta_id;
default:
- IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
+ IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
return priv->hw_params.bcast_sta_id;
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 4108c7c..d82823b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -493,7 +493,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
/* Alloc keep-warm buffer */
ret = iwl_kw_alloc(priv);
if (ret) {
- IWL_ERROR("Keep Warm allocation failed");
+ IWL_ERROR("Keep Warm allocation failed\n");
goto error_kw;
}
spin_lock_irqsave(&priv->lock, flags);
@@ -1463,7 +1463,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
+ IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
return;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 444847a..b775d5b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -1558,7 +1557,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
- IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
+ IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
return -ENOENT;
}
@@ -1583,7 +1582,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
}
if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
- IWL_ERROR("Time out reading EEPROM[%d]", addr);
+ IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
return -ETIMEDOUT;
}
e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
@@ -2507,7 +2506,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
return priv->hw_setting.bcast_sta_id;
default:
- IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
+ IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
return priv->hw_setting.bcast_sta_id;
}
}
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 83cd85e..29be3dc 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -413,12 +413,12 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
last_addr = range->end_addr;
__skb_unlink(entry, &priv->tx_queue);
memset(&info->status, 0, sizeof(info->status));
- priv->tx_stats[skb_get_queue_mapping(skb)].len--;
entry_hdr = (struct p54_control_hdr *) entry->data;
entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
pad = entry_data->align[0];
+ priv->tx_stats[entry_data->hw_queue - 4].len--;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
if (!(payload->status & 0x01))
info->flags |= IEEE80211_TX_STAT_ACK;
@@ -557,6 +557,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_control_allocdata *txhdr;
size_t padding, len;
u8 rate;
+ u8 cts_rate = 0x20;
current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
if (unlikely(current_queue->len > current_queue->limit))
@@ -581,28 +582,28 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1);
hdr->retry1 = hdr->retry2 = info->control.retry_limit;
- memset(txhdr->wep_key, 0x0, 16);
- txhdr->padding = 0;
- txhdr->padding2 = 0;
-
/* TODO: add support for alternate retry TX rates */
rate = ieee80211_get_tx_rate(dev, info)->hw_value;
- if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
+ if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) {
rate |= 0x10;
- if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
+ cts_rate |= 0x10;
+ }
+ if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
rate |= 0x40;
- else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
+ cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value;
+ } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
rate |= 0x20;
+ cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value;
+ }
memset(txhdr->rateset, rate, 8);
- txhdr->wep_key_present = 0;
- txhdr->wep_key_len = 0;
- txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4);
- txhdr->magic4 = 0;
- txhdr->antenna = (info->antenna_sel_tx == 0) ?
+ txhdr->key_type = 0;
+ txhdr->key_len = 0;
+ txhdr->hw_queue = skb_get_queue_mapping(skb) + 4;
+ txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
2 : info->antenna_sel_tx - 1;
txhdr->output_power = 0x7f; // HW Maximum
- txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
- 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23));
+ txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
+ 0 : cts_rate;
if (padding)
txhdr->align[0] = padding;
@@ -836,10 +837,21 @@ static int p54_start(struct ieee80211_hw *dev)
struct p54_common *priv = dev->priv;
int err;
+ if (!priv->cached_vdcf) {
+ priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+
+ priv->tx_hdr_len + sizeof(struct p54_control_hdr),
+ GFP_KERNEL);
+
+ if (!priv->cached_vdcf)
+ return -ENOMEM;
+ }
+
err = priv->open(dev);
if (!err)
priv->mode = IEEE80211_IF_TYPE_MNTR;
+ p54_init_vdcf(dev);
+
return err;
}
@@ -1019,15 +1031,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
sizeof(struct p54_tx_control_allocdata);
- priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) +
- priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL);
-
- if (!priv->cached_vdcf) {
- ieee80211_free_hw(dev);
- return NULL;
- }
-
- p54_init_vdcf(dev);
mutex_init(&priv->conf_mutex);
return dev;
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index 2245fcc..8db6c0e 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -183,16 +183,16 @@ struct p54_frame_sent_hdr {
struct p54_tx_control_allocdata {
u8 rateset[8];
- u16 padding;
- u8 wep_key_present;
- u8 wep_key_len;
- u8 wep_key[16];
- __le32 frame_type;
- u32 padding2;
- __le16 magic4;
- u8 antenna;
+ u8 unalloc0[2];
+ u8 key_type;
+ u8 key_len;
+ u8 key[16];
+ u8 hw_queue;
+ u8 unalloc1[9];
+ u8 tx_antenna;
u8 output_power;
- __le32 magic5;
+ u8 cts_rate;
+ u8 unalloc2[3];
u8 align[0];
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 815c095..cbaca23 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -109,7 +109,17 @@ static void p54u_rx_cb(struct urb *urb)
urb->context = skb;
skb_queue_tail(&priv->rx_queue, skb);
} else {
+ if (!priv->hw_type)
+ skb_push(skb, sizeof(struct net2280_tx_hdr));
+
+ skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
+ if (urb->transfer_buffer != skb_tail_pointer(skb)) {
+ /* this should not happen */
+ WARN_ON(1);
+ urb->transfer_buffer = skb_tail_pointer(skb);
+ }
+
skb_queue_tail(&priv->rx_queue, skb);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index a4a8c57..ff78e52 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -173,10 +173,10 @@ struct rxdone_entry_desc {
* frame transmission failed due to excessive retries.
*/
enum txdone_entry_desc_flags {
- TXDONE_UNKNOWN = 1 << 0,
- TXDONE_SUCCESS = 1 << 1,
- TXDONE_FAILURE = 1 << 2,
- TXDONE_EXCESSIVE_RETRY = 1 << 3,
+ TXDONE_UNKNOWN,
+ TXDONE_SUCCESS,
+ TXDONE_FAILURE,
+ TXDONE_EXCESSIVE_RETRY,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 8d76bb2..2050227 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -181,6 +181,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* (Only indirectly by looking at the failed TX counters
* in the register).
*/
+ txdesc.flags = 0;
if (!urb->status)
__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
else
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 57376fb..ca5deb6 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -40,6 +40,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
/* Netgear */
{USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187},
+ {USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B},
/* HP */
{USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
/* Sitecom */
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 8a1d93a..51e5214 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -57,6 +57,15 @@ static ssize_t devspec_show(struct device *dev,
return sprintf(buf, "%s\n", ofdev->node->full_name);
}
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct of_device *ofdev;
+
+ ofdev = to_of_device(dev);
+ return sprintf(buf, "%s\n", ofdev->node->name);
+}
+
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -71,6 +80,7 @@ static ssize_t modalias_show(struct device *dev,
struct device_attribute of_platform_device_attrs[] = {
__ATTR_RO(devspec),
+ __ATTR_RO(name),
__ATTR_RO(modalias),
__ATTR_NULL
};
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 2450b3a..7ba78e6 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -38,8 +38,10 @@ void free_cpu_buffers(void)
{
int i;
- for_each_online_cpu(i)
+ for_each_online_cpu(i) {
vfree(per_cpu(cpu_buffer, i).buffer);
+ per_cpu(cpu_buffer, i).buffer = NULL;
+ }
}
int alloc_cpu_buffers(void)
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index e7fbac5..8d692a5 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -93,6 +93,8 @@ out:
void free_event_buffer(void)
{
vfree(event_buffer);
+
+ event_buffer = NULL;
}
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 93e37f0..e17ef54 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
{
acpi_status status;
- acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
+ acpi_handle chandle, handle;
struct pci_dev *pdev = dev;
struct pci_bus *parent;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -399,10 +399,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it. If
* an _OSC is missing, we look for an OSHP to do the same thing.
- * To handle different BIOS behavior, we look for _OSC and OSHP
- * within the scope of the hotplug controller and its parents,
+ * To handle different BIOS behavior, we look for _OSC on a root
+ * bridge preferentially (according to PCI fw spec). Later for
+ * OSHP within the scope of the hotplug controller and its parents,
* upto the host bridge under which this controller exists.
*/
+ handle = acpi_find_root_bridge_handle(pdev);
+ if (handle) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ dbg("Trying to get hotplug control for %s\n",
+ (char *)string.pointer);
+ status = pci_osc_control_set(handle, flags);
+ if (ACPI_SUCCESS(status))
+ goto got_one;
+ kfree(string.pointer);
+ string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
+ }
+
+ pdev = dev;
+ handle = DEVICE_ACPI_HANDLE(&dev->dev);
while (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
@@ -427,15 +442,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s \n",
(char *)string.pointer);
- status = pci_osc_control_set(handle, flags);
- if (status == AE_NOT_FOUND)
- status = acpi_run_oshp(handle);
- if (ACPI_SUCCESS(status)) {
- dbg("Gained control for hotplug HW for pci %s (%s)\n",
- pci_name(dev), (char *)string.pointer);
- kfree(string.pointer);
- return 0;
- }
+ status = acpi_run_oshp(handle);
+ if (ACPI_SUCCESS(status))
+ goto got_one;
if (acpi_root_bridge(handle))
break;
chandle = handle;
@@ -449,6 +458,11 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
kfree(string.pointer);
return -ENODEV;
+got_one:
+ dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev),
+ (char *)string.pointer);
+ kfree(string.pointer);
+ return 0;
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e3a1e7e..9e6cec6 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -43,7 +43,6 @@ extern int pciehp_poll_mode;
extern int pciehp_poll_time;
extern int pciehp_debug;
extern int pciehp_force;
-extern int pciehp_slot_with_bus;
extern struct workqueue_struct *pciehp_wq;
#define dbg(format, arg...) \
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3677495..4fd5355 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -41,7 +41,6 @@ int pciehp_debug;
int pciehp_poll_mode;
int pciehp_poll_time;
int pciehp_force;
-int pciehp_slot_with_bus;
struct workqueue_struct *pciehp_wq;
#define DRIVER_VERSION "0.4"
@@ -56,12 +55,10 @@ module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
module_param(pciehp_force, bool, 0644);
-module_param(pciehp_slot_with_bus, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
-MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name");
#define PCIE_MODULE_NAME "pciehp"
@@ -194,6 +191,7 @@ static int init_slots(struct controller *ctrl)
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
+ int len, dup = 1;
int retval = -ENOMEM;
list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -220,15 +218,24 @@ static int init_slots(struct controller *ctrl)
dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
"slot_device_offset=%x\n", slot->bus, slot->device,
slot->hp_slot, slot->number, ctrl->slot_device_offset);
+duplicate_name:
retval = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate,
slot->device);
if (retval) {
+ /*
+ * If slot N already exists, we'll try to create
+ * slot N-1, N-2 ... N-M, until we overflow.
+ */
+ if (retval == -EEXIST) {
+ len = snprintf(slot->name, SLOT_NAME_SIZE,
+ "%d-%d", slot->number, dup++);
+ if (len < SLOT_NAME_SIZE)
+ goto duplicate_name;
+ else
+ err("duplicate slot name overflow\n");
+ }
err("pci_hp_register failed with error %d\n", retval);
- if (retval == -EEXIST)
- err("Failed to register slot because of name "
- "collision. Try \'pciehp_slot_with_bus\' "
- "module option.\n");
goto error_info;
}
/* create additional sysfs entries */
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index ad27e9e..ab31f5b 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1030,15 +1030,6 @@ static void pcie_shutdown_notification(struct controller *ctrl)
pciehp_free_irq(ctrl);
}
-static void make_slot_name(struct slot *slot)
-{
- if (pciehp_slot_with_bus)
- snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d",
- slot->bus, slot->number);
- else
- snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
-}
-
static int pcie_init_slot(struct controller *ctrl)
{
struct slot *slot;
@@ -1053,7 +1044,7 @@ static int pcie_init_slot(struct controller *ctrl)
slot->device = ctrl->slot_device_offset + slot->hp_slot;
slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot;
- make_slot_name(slot);
+ snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
list_add(&slot->slot_list, &ctrl->slot_list);
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index a8cbd03..cc38615 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,7 +39,6 @@
int shpchp_debug;
int shpchp_poll_mode;
int shpchp_poll_time;
-static int shpchp_slot_with_bus;
struct workqueue_struct *shpchp_wq;
#define DRIVER_VERSION "0.4"
@@ -53,11 +52,9 @@ MODULE_LICENSE("GPL");
module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);
module_param(shpchp_poll_time, int, 0644);
-module_param(shpchp_slot_with_bus, bool, 0644);
MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name");
#define SHPC_MODULE_NAME "shpchp"
@@ -99,23 +96,13 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot);
}
-static void make_slot_name(struct slot *slot)
-{
- if (shpchp_slot_with_bus)
- snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
- slot->bus, slot->number);
- else
- snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
- slot->number);
-}
-
static int init_slots(struct controller *ctrl)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
int retval = -ENOMEM;
- int i;
+ int i, len, dup = 1;
for (i = 0; i < ctrl->num_slots; i++) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
@@ -146,7 +133,7 @@ static int init_slots(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
- make_slot_name(slot);
+ snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
get_power_status(hotplug_slot, &info->power_status);
@@ -157,14 +144,23 @@ static int init_slots(struct controller *ctrl)
dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
"slot_device_offset=%x\n", slot->bus, slot->device,
slot->hp_slot, slot->number, ctrl->slot_device_offset);
+duplicate_name:
retval = pci_hp_register(slot->hotplug_slot,
ctrl->pci_dev->subordinate, slot->device);
if (retval) {
+ /*
+ * If slot N already exists, we'll try to create
+ * slot N-1, N-2 ... N-M, until we overflow.
+ */
+ if (retval == -EEXIST) {
+ len = snprintf(slot->name, SLOT_NAME_SIZE,
+ "%d-%d", slot->number, dup++);
+ if (len < SLOT_NAME_SIZE)
+ goto duplicate_name;
+ else
+ err("duplicate slot name overflow\n");
+ }
err("pci_hp_register failed with error %d\n", retval);
- if (retval == -EEXIST)
- err("Failed to register slot because of name "
- "collision. Try \'shpchp_slot_with_bus\' "
- "module option.\n");
goto error_info;
}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 30f581b..6dd7b13 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -36,12 +36,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
if (acpi_pci_disabled)
return -1;
- /* Find root host bridge */
- while (pdev->bus->self)
- pdev = pdev->bus->self;
- handle = acpi_get_pci_rootbridge_handle(
- pci_domain_nr(pdev->bus), pdev->bus->number);
-
+ handle = acpi_find_root_bridge_handle(pdev);
if (handle) {
pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT);
status = pci_osc_control_set(handle,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a04498d..cce2f4c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -383,6 +383,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->start = base;
if (!res->end)
res->end = limit + 0xfff;
+ printk(KERN_INFO "PCI: bridge %s io port: [%llx, %llx]\n", pci_name(dev), res->start, res->end);
}
res = child->resource[1];
@@ -394,6 +395,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
res->start = base;
res->end = limit + 0xfffff;
+ printk(KERN_INFO "PCI: bridge %s 32bit mmio: [%llx, %llx]\n", pci_name(dev), res->start, res->end);
}
res = child->resource[2];
@@ -429,6 +431,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
res->start = base;
res->end = limit + 0xfffff;
+ printk(KERN_INFO "PCI: bridge %s %sbit mmio pref: [%llx, %llx]\n", pci_name(dev), (res->flags & PCI_PREF_RANGE_TYPE_64)?"64":"32",res->start, res->end);
}
}
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 217814f..3b3b5f1 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -280,6 +280,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
match_pci_dev_by_id);
if (dev)
pdev = to_pci_dev(dev);
+ if (from)
+ pci_dev_put(from);
return pdev;
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 827c0a5..82634a2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -530,6 +530,36 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_assign_resources);
+static void pci_bus_dump_res(struct pci_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ struct resource *res = bus->resource[i];
+ if (!res)
+ continue;
+
+ printk(KERN_INFO "bus: %02x index %x %s: [%llx, %llx]\n", bus->number, i, (res->flags & IORESOURCE_IO)? "io port":"mmio", res->start, res->end);
+ }
+}
+
+static void pci_bus_dump_resources(struct pci_bus *bus)
+{
+ struct pci_bus *b;
+ struct pci_dev *dev;
+
+
+ pci_bus_dump_res(bus);
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ pci_bus_dump_resources(b);
+ }
+}
+
void __init
pci_assign_unassigned_resources(void)
{
@@ -545,4 +575,9 @@ pci_assign_unassigned_resources(void)
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
+
+ /* dump the resource on buses */
+ list_for_each_entry(bus, &pci_root_buses, node) {
+ pci_bus_dump_resources(bus);
+ }
}
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index a8771ff..e07b5c5 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -23,12 +23,57 @@
static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY);
+ int ret;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0);
+ if (ret)
+ goto err2;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0);
+ if (ret)
+ goto err3;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST");
+ if (ret)
+ goto err3;
+ ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1);
+ if (ret)
+ goto err4;
+
+ ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY");
+ if (ret)
+ goto err4;
+ ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY);
+ if (ret)
+ goto err5;
+
+ skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY);
return 0;
+
+err5:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
+err4:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
+err3:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
+err2:
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
+err1:
+ return ret;
}
static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
+ gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
}
static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
@@ -109,7 +154,7 @@ static void __exit palmtx_pcmcia_exit(void)
platform_device_unregister(palmtx_pcmcia_device);
}
-fs_initcall(palmtx_pcmcia_init);
+module_init(palmtx_pcmcia_init);
module_exit(palmtx_pcmcia_exit);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 90ab738..9a9755c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -561,7 +561,7 @@ config RTC_DRV_AT91SAM9_GPBR
config RTC_DRV_BFIN
tristate "Blackfin On-Chip RTC"
- depends on BLACKFIN
+ depends on BLACKFIN && !BF561
help
If you say yes here you will get support for the
Blackfin On-Chip Real Time Clock.
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index a1af4c2..34439ce 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -218,26 +218,6 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
-static int bfin_rtc_open(struct device *dev)
-{
- int ret;
-
- dev_dbg_stamp(dev);
-
- ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev);
- if (!ret)
- bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
-
- return ret;
-}
-
-static void bfin_rtc_release(struct device *dev)
-{
- dev_dbg_stamp(dev);
- bfin_rtc_reset(dev, 0);
- free_irq(IRQ_RTC, dev);
-}
-
static void bfin_rtc_int_set(u16 rtc_int)
{
bfin_write_RTC_ISTAT(rtc_int);
@@ -370,8 +350,6 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
}
static struct rtc_class_ops bfin_rtc_ops = {
- .open = bfin_rtc_open,
- .release = bfin_rtc_release,
.ioctl = bfin_rtc_ioctl,
.read_time = bfin_rtc_read_time,
.set_time = bfin_rtc_set_time,
@@ -383,29 +361,44 @@ static struct rtc_class_ops bfin_rtc_ops = {
static int __devinit bfin_rtc_probe(struct platform_device *pdev)
{
struct bfin_rtc *rtc;
+ struct device *dev = &pdev->dev;
int ret = 0;
+ unsigned long timeout;
- dev_dbg_stamp(&pdev->dev);
+ dev_dbg_stamp(dev);
+ /* Allocate memory for our RTC struct */
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
return -ENOMEM;
+ platform_set_drvdata(pdev, rtc);
+ device_init_wakeup(dev, 1);
- rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc->rtc_dev);
+ /* Grab the IRQ and init the hardware */
+ ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
+ if (unlikely(ret))
goto err;
- }
-
- /* see comment at top of file about stopwatch/PIE */
+ /* sometimes the bootloader touched things, but the write complete was not
+ * enabled, so let's just do a quick timeout here since the IRQ will not fire ...
+ */
+ timeout = jiffies + HZ;
+ while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
+ if (time_after(jiffies, timeout))
+ break;
+ bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
- platform_set_drvdata(pdev, rtc);
-
- device_init_wakeup(&pdev->dev, 1);
+ /* Register our RTC with the RTC framework */
+ rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
+ if (unlikely(IS_ERR(rtc))) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err_irq;
+ }
return 0;
+ err_irq:
+ free_irq(IRQ_RTC, dev);
err:
kfree(rtc);
return ret;
@@ -414,7 +407,10 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
static int __devexit bfin_rtc_remove(struct platform_device *pdev)
{
struct bfin_rtc *rtc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ bfin_rtc_reset(dev, 0);
+ free_irq(IRQ_RTC, dev);
rtc_device_unregister(rtc->rtc_dev);
platform_set_drvdata(pdev, NULL);
kfree(rtc);
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 856cc1a..f118252 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/rtc.h>
-#include <linux/smp_lock.h>
#include "rtc-core.h"
static dev_t rtc_devt;
@@ -27,11 +26,8 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
struct rtc_device, char_dev);
const struct rtc_class_ops *ops = rtc->ops;
- lock_kernel();
- if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) {
- err = -EBUSY;
- goto out;
- }
+ if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
+ return -EBUSY;
file->private_data = rtc;
@@ -41,13 +37,11 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
- goto out;
+ return 0;
}
/* something has gone wrong */
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
-out:
- unlock_kernel();
return err;
}
@@ -409,11 +403,14 @@ static long rtc_dev_ioctl(struct file *file,
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
case RTC_UIE_OFF:
+ mutex_unlock(&rtc->ops_lock);
clear_uie(rtc);
- break;
+ return 0;
case RTC_UIE_ON:
+ mutex_unlock(&rtc->ops_lock);
err = set_uie(rtc);
+ return err;
#endif
default:
err = -ENOTTY;
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 640acd2..a150418 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -173,7 +173,7 @@ static int ds1374_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
int cr, sr;
int ret = 0;
- if (client->irq < 0)
+ if (client->irq <= 0)
return -EINVAL;
mutex_lock(&ds1374->mutex);
@@ -212,7 +212,7 @@ static int ds1374_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
int cr;
int ret = 0;
- if (client->irq < 0)
+ if (client->irq <= 0)
return -EINVAL;
ret = ds1374_read_time(dev, &now);
@@ -381,7 +381,7 @@ static int ds1374_probe(struct i2c_client *client,
if (ret)
goto out_free;
- if (client->irq >= 0) {
+ if (client->irq > 0) {
ret = request_irq(client->irq, ds1374_irq, 0,
"ds1374", client);
if (ret) {
@@ -401,7 +401,7 @@ static int ds1374_probe(struct i2c_client *client,
return 0;
out_irq:
- if (client->irq >= 0)
+ if (client->irq > 0)
free_irq(client->irq, client);
out_free:
@@ -414,7 +414,7 @@ static int __devexit ds1374_remove(struct i2c_client *client)
{
struct ds1374 *ds1374 = i2c_get_clientdata(client);
- if (client->irq >= 0) {
+ if (client->irq > 0) {
mutex_lock(&ds1374->mutex);
ds1374->exiting = 1;
mutex_unlock(&ds1374->mutex);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index fbb90b1..a81adab 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -482,7 +482,7 @@ isl1208_sysfs_register(struct device *dev)
static int
isl1208_sysfs_unregister(struct device *dev)
{
- device_remove_file(dev, &dev_attr_atrim);
+ device_remove_file(dev, &dev_attr_dtrim);
device_remove_file(dev, &dev_attr_atrim);
device_remove_file(dev, &dev_attr_usr);
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 12f0310..78b2551 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -20,8 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
-
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/init.h>
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index b35f9bf..395985b 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/device.h>
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1b6c52e..acb7801 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2333,13 +2333,11 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
{
struct dasd_device *device;
struct dasd_ccw_req *cqr;
- unsigned long flags;
int ret;
- device = dasd_device_from_cdev(cdev);
+ device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return 0;
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
ret = 0;
switch (event) {
case CIO_GONE:
@@ -2369,7 +2367,6 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
ret = 1;
break;
}
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
dasd_put_device(device);
return ret;
}
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 4bf0aa5..2476f87 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -308,7 +308,7 @@ struct dasd_psf_prssd_data {
unsigned char flags;
unsigned char reserved[4];
unsigned char suborder;
- unsigned char varies[9];
+ unsigned char varies[5];
} __attribute__ ((packed));
/*
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 29da441..bf512ac 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -16,6 +16,7 @@
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/smp_lock.h>
+#include <linux/err.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
@@ -457,7 +458,7 @@ int dasd_eer_enable(struct dasd_device *device)
cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */,
SNSS_DATA_SIZE, device);
- if (!cqr)
+ if (IS_ERR(cqr))
return -ENOMEM;
cqr->startdev = device;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 01fcdd9..711b300 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -384,6 +384,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
* get minor, add to list
*/
down_write(&dcssblk_devices_sem);
+ if (dcssblk_get_device_by_name(local_buf)) {
+ up_write(&dcssblk_devices_sem);
+ rc = -EEXIST;
+ goto unload_seg;
+ }
rc = dcssblk_assign_free_minor(dev_info);
if (rc) {
up_write(&dcssblk_devices_sem);
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 687720b..be0ce22 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -109,7 +109,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
/* The current idal buffer is not correct. Allocate a new one. */
new = idal_buffer_alloc(block_size, 0);
- if (new == NULL)
+ if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 2a1af4e..cc8fd78 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -248,7 +248,7 @@ tape_std_mtsetblk(struct tape_device *device, int count)
/* Allocate a new idal buffer. */
new = idal_buffer_alloc(count, 0);
- if (new == NULL)
+ if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 26a930e..e0ce65f 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -112,8 +112,10 @@ ccwgroup_release (struct device *dev)
gdev = to_ccwgroupdev(dev);
for (i = 0; i < gdev->count; i++) {
- dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
- put_device(&gdev->cdev[i]->dev);
+ if (gdev->cdev[i]) {
+ dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ put_device(&gdev->cdev[i]->dev);
+ }
}
kfree(gdev);
}
@@ -221,6 +223,13 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
+ gdev->creator_id = creator_id;
+ gdev->count = num_devices;
+ gdev->dev.bus = &ccwgroup_bus_type;
+ gdev->dev.parent = root;
+ gdev->dev.release = ccwgroup_release;
+ device_initialize(&gdev->dev);
+
curr_buf = buf;
for (i = 0; i < num_devices && curr_buf; i++) {
rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
@@ -258,16 +267,11 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
rc = -EINVAL;
goto error;
}
- gdev->creator_id = creator_id;
- gdev->count = num_devices;
- gdev->dev.bus = &ccwgroup_bus_type;
- gdev->dev.parent = root;
- gdev->dev.release = ccwgroup_release;
snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
gdev->cdev[0]->dev.bus_id);
- rc = device_register(&gdev->dev);
+ rc = device_add(&gdev->dev);
if (rc)
goto error;
get_device(&gdev->dev);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 46c021d..51489ef 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -477,7 +477,6 @@ void css_schedule_eval_all(void)
void css_wait_for_slow_path(void)
{
- flush_workqueue(ccw_device_notify_work);
flush_workqueue(slow_path_wq);
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e818d0c..2822103 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -150,7 +150,6 @@ static struct css_driver io_subchannel_driver = {
};
struct workqueue_struct *ccw_device_work;
-struct workqueue_struct *ccw_device_notify_work;
wait_queue_head_t ccw_device_init_wq;
atomic_t ccw_device_init_count;
@@ -168,11 +167,6 @@ init_ccw_bus_type (void)
ccw_device_work = create_singlethread_workqueue("cio");
if (!ccw_device_work)
return -ENOMEM; /* FIXME: better errno ? */
- ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
- if (!ccw_device_notify_work) {
- ret = -ENOMEM; /* FIXME: better errno ? */
- goto out_err;
- }
slow_path_wq = create_singlethread_workqueue("kslowcrw");
if (!slow_path_wq) {
ret = -ENOMEM; /* FIXME: better errno ? */
@@ -192,8 +186,6 @@ init_ccw_bus_type (void)
out_err:
if (ccw_device_work)
destroy_workqueue(ccw_device_work);
- if (ccw_device_notify_work)
- destroy_workqueue(ccw_device_notify_work);
if (slow_path_wq)
destroy_workqueue(slow_path_wq);
return ret;
@@ -204,7 +196,6 @@ cleanup_ccw_bus_type (void)
{
css_driver_unregister(&io_subchannel_driver);
bus_unregister(&ccw_bus_type);
- destroy_workqueue(ccw_device_notify_work);
destroy_workqueue(ccw_device_work);
}
@@ -1496,11 +1487,22 @@ static void device_set_disconnected(struct ccw_device *cdev)
ccw_device_schedule_recovery();
}
+void ccw_device_set_notoper(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ CIO_TRACE_EVENT(2, "notoper");
+ CIO_TRACE_EVENT(2, sch->dev.bus_id);
+ ccw_device_set_timeout(cdev, 0);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+}
+
static int io_subchannel_sch_event(struct subchannel *sch, int slow)
{
int event, ret, disc;
unsigned long flags;
- enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
+ enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
struct ccw_device *cdev;
spin_lock_irqsave(sch->lock, flags);
@@ -1535,16 +1537,11 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
}
/* fall through */
case CIO_GONE:
- /* Prevent unwanted effects when opening lock. */
- cio_disable_subchannel(sch);
- device_set_disconnected(cdev);
/* Ask driver what to do with device. */
- action = UNREGISTER;
- spin_unlock_irqrestore(sch->lock, flags);
- ret = io_subchannel_notify(sch, event);
- spin_lock_irqsave(sch->lock, flags);
- if (ret)
- action = NONE;
+ if (io_subchannel_notify(sch, event))
+ action = DISC;
+ else
+ action = UNREGISTER;
break;
case CIO_REVALIDATE:
/* Device will be removed, so no notify necessary. */
@@ -1565,6 +1562,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
switch (action) {
case UNREGISTER:
case UNREGISTER_PROBE:
+ ccw_device_set_notoper(cdev);
/* Unregister device (will use subchannel lock). */
spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
@@ -1577,6 +1575,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
case REPROBE:
ccw_device_trigger_reprobe(cdev);
break;
+ case DISC:
+ device_set_disconnected(cdev);
+ break;
default:
break;
}
@@ -1828,5 +1829,4 @@ EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type);
EXPORT_SYMBOL(ccw_device_work);
-EXPORT_SYMBOL(ccw_device_notify_work);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 9800a83..6f5c3f2 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -72,7 +72,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
}
extern struct workqueue_struct *ccw_device_work;
-extern struct workqueue_struct *ccw_device_notify_work;
extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
@@ -120,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *);
void ccw_device_trigger_reprobe(struct ccw_device *);
void ccw_device_kill_io(struct ccw_device *);
int ccw_device_notify(struct ccw_device *, int);
+void ccw_device_set_notoper(struct ccw_device *cdev);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8b5fe57..550508d 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -337,26 +337,34 @@ int ccw_device_notify(struct ccw_device *cdev, int event)
return 0;
if (!cdev->online)
return 0;
+ CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ event);
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
}
-static void
-ccw_device_oper_notify(struct work_struct *work)
+static void cmf_reenable_delayed(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
- int ret;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
- ret = ccw_device_notify(cdev, CIO_OPER);
- if (ret) {
+ cmf_reenable(cdev);
+}
+
+static void ccw_device_oper_notify(struct ccw_device *cdev)
+{
+ if (ccw_device_notify(cdev, CIO_OPER)) {
/* Reenable channel measurements, if needed. */
- cmf_reenable(cdev);
- wake_up(&cdev->private->wait_q);
- } else
- /* Driver doesn't want device back. */
- ccw_device_do_unreg_rereg(work);
+ PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
+ return;
+ }
+ /* Driver doesn't want device back. */
+ ccw_device_set_notoper(cdev);
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg);
+ queue_work(ccw_device_work, &cdev->private->kick_work);
}
/*
@@ -386,8 +394,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
+ ccw_device_oper_notify(cdev);
}
wake_up(&cdev->private->wait_q);
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 8484b83..5a4d85b 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -61,18 +61,18 @@
/* s390dbf views */
#define QDIO_DBF_SETUP_LEN 8
-#define QDIO_DBF_SETUP_PAGES 4
+#define QDIO_DBF_SETUP_PAGES 8
#define QDIO_DBF_SETUP_NR_AREAS 1
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_TRACE_PAGES 16
+#define QDIO_DBF_TRACE_PAGES 32
#define QDIO_DBF_SETUP_LEVEL 6
#define QDIO_DBF_TRACE_LEVEL 4
#else /* !CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_TRACE_PAGES 4
+#define QDIO_DBF_TRACE_PAGES 8
#define QDIO_DBF_SETUP_LEVEL 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d156485..e6eabc8 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -330,6 +330,7 @@ static int qdio_siga_output(struct qdio_q *q)
int cc;
u32 busy_bit;
u64 start_time = 0;
+ char dbf_text[15];
QDIO_DBF_TEXT5(0, trace, "sigaout");
QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
@@ -338,6 +339,9 @@ static int qdio_siga_output(struct qdio_q *q)
again:
cc = qdio_do_siga_output(q, &busy_bit);
if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
+ sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr);
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+
if (!start_time)
start_time = get_usecs();
else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
@@ -748,16 +752,18 @@ static void qdio_kick_outbound_q(struct qdio_q *q)
rc = qdio_siga_output(q);
switch (rc) {
case 0:
- /* went smooth this time, reset timestamp */
- q->u.out.timestamp = 0;
-
/* TODO: improve error handling for CC=0 case */
#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT3(0, trace, "cc2reslv");
- sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
- atomic_read(&q->u.out.busy_siga_counter));
- QDIO_DBF_TEXT3(0, trace, dbf_text);
+ if (q->u.out.timestamp) {
+ QDIO_DBF_TEXT3(0, trace, "cc2reslv");
+ sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no,
+ q->nr,
+ atomic_read(&q->u.out.busy_siga_counter));
+ QDIO_DBF_TEXT3(0, trace, dbf_text);
+ }
#endif /* CONFIG_QDIO_DEBUG */
+ /* went smooth this time, reset timestamp */
+ q->u.out.timestamp = 0;
break;
/* cc=2 and busy bit */
case (2 | QDIO_ERROR_SIGA_BUSY):
@@ -1066,14 +1072,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
- sprintf(dbf_text, "ierr%4x",
- cdev->private->schid.sch_no);
+ sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
qdio_int_error(cdev);
return;
case -ETIMEDOUT:
- sprintf(dbf_text, "qtoh%4x",
- cdev->private->schid.sch_no);
+ sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
qdio_int_error(cdev);
return;
@@ -1124,8 +1128,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
+ char dbf_text[15];
- QDIO_DBF_TEXT0(0, setup, "getssqd");
+ sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
@@ -1149,14 +1155,13 @@ int qdio_cleanup(struct ccw_device *cdev, int how)
char dbf_text[15];
int rc;
+ sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT1(0, trace, dbf_text);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
-
rc = qdio_shutdown(cdev, how);
if (rc == 0)
rc = qdio_free(cdev);
@@ -1191,6 +1196,9 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
unsigned long flags;
char dbf_text[15];
+ sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1205,10 +1213,6 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
return 0;
}
- sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT1(0, trace, dbf_text);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
-
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr, cdev);
@@ -1247,7 +1251,6 @@ no_cleanup:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
- module_put(THIS_MODULE);
if (rc)
return rc;
return 0;
@@ -1263,16 +1266,14 @@ int qdio_free(struct ccw_device *cdev)
struct qdio_irq *irq_ptr;
char dbf_text[15];
+ sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
mutex_lock(&irq_ptr->setup_mutex);
-
- sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT1(0, trace, dbf_text);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
-
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1295,7 +1296,6 @@ int qdio_initialize(struct qdio_initialize *init_data)
sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_TEXT0(0, trace, dbf_text);
rc = qdio_allocate(init_data);
if (rc)
@@ -1319,7 +1319,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_TEXT0(0, trace, dbf_text);
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
@@ -1389,6 +1388,9 @@ int qdio_establish(struct qdio_initialize *init_data)
unsigned long saveflags;
int rc;
+ sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1396,13 +1398,6 @@ int qdio_establish(struct qdio_initialize *init_data)
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
- if (!try_module_get(THIS_MODULE))
- return -EINVAL;
-
- sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_TEXT0(0, trace, dbf_text);
-
mutex_lock(&irq_ptr->setup_mutex);
qdio_setup_irq(init_data);
@@ -1472,6 +1467,9 @@ int qdio_activate(struct ccw_device *cdev)
unsigned long saveflags;
char dbf_text[20];
+ sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no);
+ QDIO_DBF_TEXT0(0, setup, dbf_text);
+
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1485,10 +1483,6 @@ int qdio_activate(struct ccw_device *cdev)
goto out;
}
- sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
- QDIO_DBF_TEXT2(0, trace, dbf_text);
-
irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
irq_ptr->ccw.flags = CCW_FLAG_SLI;
irq_ptr->ccw.count = irq_ptr->aqueue.count;
@@ -1663,7 +1657,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
- sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no);
+ sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no);
QDIO_DBF_TEXT3(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 1bd2a20..1679e2f 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -165,7 +165,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
void **output_sbal_array = qdio_init->output_sbal_addr_array;
int i;
- sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
+ sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
for_each_input_queue(irq_ptr, q, i) {
@@ -285,7 +285,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
rc = __get_ssqd_info(irq_ptr);
if (rc) {
QDIO_DBF_TEXT2(0, setup, "ssqdasig");
- sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
+ sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(0, setup, dbf_text);
sprintf(dbf_text, "rc:%d", rc);
QDIO_DBF_TEXT2(0, setup, dbf_text);
@@ -447,7 +447,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
{
char s[80];
- sprintf(s, "%s ", cdev->dev.bus_id);
+ sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no);
switch (irq_ptr->qib.qfmt) {
case QDIO_QETH_QFMT:
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 9291a77..ea7f614 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -113,7 +113,11 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
int i;
- for_each_input_queue(irq_ptr, q, i) {
+ for (i = 0; i < irq_ptr->nr_input_qs; i++) {
+ q = irq_ptr->input_qs[i];
+ /* if establish triggered an error */
+ if (!q || !q->entry.prev || !q->entry.next)
+ continue;
list_del_rcu(&q->entry);
synchronize_rcu();
}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index a08b168..e10ac9a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -133,14 +133,14 @@ claw_register_debug_facility(void)
static inline void
claw_set_busy(struct net_device *dev)
{
- ((struct claw_privbk *) dev->priv)->tbusy=1;
+ ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
eieio();
}
static inline void
claw_clear_busy(struct net_device *dev)
{
- clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy));
+ clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
netif_wake_queue(dev);
eieio();
}
@@ -149,20 +149,20 @@ static inline int
claw_check_busy(struct net_device *dev)
{
eieio();
- return ((struct claw_privbk *) dev->priv)->tbusy;
+ return ((struct claw_privbk *) dev->ml_priv)->tbusy;
}
static inline void
claw_setbit_busy(int nr,struct net_device *dev)
{
netif_stop_queue(dev);
- set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy));
+ set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
}
static inline void
claw_clearbit_busy(int nr,struct net_device *dev)
{
- clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy));
+ clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
netif_wake_queue(dev);
}
@@ -171,7 +171,7 @@ claw_test_and_setbit_busy(int nr,struct net_device *dev)
{
netif_stop_queue(dev);
return test_and_set_bit(nr,
- (void *)&(((struct claw_privbk *) dev->priv)->tbusy));
+ (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
}
@@ -271,6 +271,7 @@ claw_probe(struct ccwgroup_device *cgdev)
if (!get_device(&cgdev->dev))
return -ENODEV;
privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
+ cgdev->dev.driver_data = privptr;
if (privptr == NULL) {
probe_error(cgdev);
put_device(&cgdev->dev);
@@ -305,7 +306,6 @@ claw_probe(struct ccwgroup_device *cgdev)
privptr->p_env->p_priv = privptr;
cgdev->cdev[0]->handler = claw_irq_handler;
cgdev->cdev[1]->handler = claw_irq_handler;
- cgdev->dev.driver_data = privptr;
CLAW_DBF_TEXT(2, setup, "prbext 0");
return 0;
@@ -319,7 +319,7 @@ static int
claw_tx(struct sk_buff *skb, struct net_device *dev)
{
int rc;
- struct claw_privbk *privptr=dev->priv;
+ struct claw_privbk *privptr = dev->ml_priv;
unsigned long saveflags;
struct chbk *p_ch;
@@ -404,7 +404,7 @@ claw_pack_skb(struct claw_privbk *privptr)
static int
claw_change_mtu(struct net_device *dev, int new_mtu)
{
- struct claw_privbk *privptr=dev->priv;
+ struct claw_privbk *privptr = dev->ml_priv;
int buff_size;
CLAW_DBF_TEXT(4, trace, "setmtu");
buff_size = privptr->p_env->write_size;
@@ -434,7 +434,7 @@ claw_open(struct net_device *dev)
struct ccwbk *p_buf;
CLAW_DBF_TEXT(4, trace, "open");
- privptr = (struct claw_privbk *)dev->priv;
+ privptr = (struct claw_privbk *)dev->ml_priv;
/* allocate and initialize CCW blocks */
if (privptr->buffs_alloc == 0) {
rc=init_ccw_bk(dev);
@@ -780,7 +780,7 @@ claw_irq_tasklet ( unsigned long data )
p_ch = (struct chbk *) data;
dev = (struct net_device *)p_ch->ndev;
CLAW_DBF_TEXT(4, trace, "IRQtask");
- privptr = (struct claw_privbk *) dev->priv;
+ privptr = (struct claw_privbk *)dev->ml_priv;
unpack_read(dev);
clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
CLAW_DBF_TEXT(4, trace, "TskletXt");
@@ -805,7 +805,7 @@ claw_release(struct net_device *dev)
if (!dev)
return 0;
- privptr = (struct claw_privbk *) dev->priv;
+ privptr = (struct claw_privbk *)dev->ml_priv;
if (!privptr)
return 0;
CLAW_DBF_TEXT(4, trace, "release");
@@ -960,7 +960,7 @@ claw_write_next ( struct chbk * p_ch )
if (p_ch->claw_state == CLAW_STOP)
return;
dev = (struct net_device *) p_ch->ndev;
- privptr = (struct claw_privbk *) dev->priv;
+ privptr = (struct claw_privbk *) dev->ml_priv;
claw_free_wrt_buf( dev );
if ((privptr->write_free_count > 0) &&
!skb_queue_empty(&p_ch->collect_queue)) {
@@ -1042,7 +1042,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
struct ccw1 temp_ccw;
struct endccw * p_end;
CLAW_DBF_TEXT(4, trace, "addreads");
- privptr = dev->priv;
+ privptr = dev->ml_priv;
p_end = privptr->p_end_ccw;
/* first CCW and last CCW contains a new set of read channel programs
@@ -1212,7 +1212,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name )
int rc=0;
CLAW_DBF_TEXT(2, setup, "findlink");
- privptr=dev->priv;
+ privptr = dev->ml_priv;
p_env=privptr->p_env;
switch (p_env->packing)
{
@@ -1264,7 +1264,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
struct chbk *ch;
CLAW_DBF_TEXT(4, trace, "hw_tx");
- privptr = (struct claw_privbk *) (dev->priv);
+ privptr = (struct claw_privbk *)(dev->ml_priv);
p_ch=(struct chbk *)&privptr->channel[WRITE];
p_env =privptr->p_env;
claw_free_wrt_buf(dev); /* Clean up free chain if posible */
@@ -1483,8 +1483,8 @@ init_ccw_bk(struct net_device *dev)
struct ccwbk*p_last_CCWB;
struct ccwbk*p_first_CCWB;
struct endccw *p_endccw=NULL;
- addr_t real_address;
- struct claw_privbk *privptr=dev->priv;
+ addr_t real_address;
+ struct claw_privbk *privptr = dev->ml_priv;
struct clawh *pClawH=NULL;
addr_t real_TIC_address;
int i,j;
@@ -1960,19 +1960,16 @@ init_ccw_bk(struct net_device *dev)
static void
probe_error( struct ccwgroup_device *cgdev)
{
- struct claw_privbk *privptr;
+ struct claw_privbk *privptr;
CLAW_DBF_TEXT(4, trace, "proberr");
- privptr=(struct claw_privbk *)cgdev->dev.driver_data;
- if (privptr!=NULL) {
+ privptr = (struct claw_privbk *) cgdev->dev.driver_data;
+ if (privptr != NULL) {
+ cgdev->dev.driver_data = NULL;
kfree(privptr->p_env);
- privptr->p_env=NULL;
- kfree(privptr->p_mtc_envelope);
- privptr->p_mtc_envelope=NULL;
- kfree(privptr);
- privptr=NULL;
- }
- return;
+ kfree(privptr->p_mtc_envelope);
+ kfree(privptr);
+ }
} /* probe_error */
/*-------------------------------------------------------------------*
@@ -2000,7 +1997,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
CLAW_DBF_TEXT(2, setup, "clw_cntl");
udelay(1000); /* Wait a ms for the control packets to
*catch up to each other */
- privptr=dev->priv;
+ privptr = dev->ml_priv;
p_env=privptr->p_env;
tdev = &privptr->channel[READ].cdev->dev;
memcpy( &temp_host_name, p_env->host_name, 8);
@@ -2278,7 +2275,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link,
struct sk_buff *skb;
CLAW_DBF_TEXT(2, setup, "sndcntl");
- privptr=dev->priv;
+ privptr = dev->ml_priv;
p_ctl=(struct clawctl *)&privptr->ctl_bk;
p_ctl->command=type;
@@ -2348,7 +2345,7 @@ static int
claw_snd_conn_req(struct net_device *dev, __u8 link)
{
int rc;
- struct claw_privbk *privptr=dev->priv;
+ struct claw_privbk *privptr = dev->ml_priv;
struct clawctl *p_ctl;
CLAW_DBF_TEXT(2, setup, "snd_conn");
@@ -2408,7 +2405,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev,
int rc;
CLAW_DBF_TEXT(2, setup, "chkresp");
- privptr = dev->priv;
+ privptr = dev->ml_priv;
p_env=privptr->p_env;
rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
p_ctl->linkid,
@@ -2446,7 +2443,7 @@ net_device_stats *claw_stats(struct net_device *dev)
struct claw_privbk *privptr;
CLAW_DBF_TEXT(4, trace, "stats");
- privptr = dev->priv;
+ privptr = dev->ml_priv;
return &privptr->stats;
} /* end of claw_stats */
@@ -2482,7 +2479,7 @@ unpack_read(struct net_device *dev )
p_last_ccw=NULL;
p_packh=NULL;
p_packd=NULL;
- privptr=dev->priv;
+ privptr = dev->ml_priv;
p_dev = &privptr->channel[READ].cdev->dev;
p_env = privptr->p_env;
@@ -2651,7 +2648,7 @@ claw_strt_read (struct net_device *dev, int lock )
int rc = 0;
__u32 parm;
unsigned long saveflags = 0;
- struct claw_privbk *privptr=dev->priv;
+ struct claw_privbk *privptr = dev->ml_priv;
struct ccwbk*p_ccwbk;
struct chbk *p_ch;
struct clawh *p_clawh;
@@ -2708,7 +2705,7 @@ claw_strt_out_IO( struct net_device *dev )
if (!dev) {
return;
}
- privptr=(struct claw_privbk *)dev->priv;
+ privptr = (struct claw_privbk *)dev->ml_priv;
p_ch=&privptr->channel[WRITE];
CLAW_DBF_TEXT(4, trace, "strt_io");
@@ -2741,7 +2738,7 @@ static void
claw_free_wrt_buf( struct net_device *dev )
{
- struct claw_privbk *privptr=(struct claw_privbk *)dev->priv;
+ struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
struct ccwbk*p_first_ccw;
struct ccwbk*p_last_ccw;
struct ccwbk*p_this_ccw;
@@ -2798,13 +2795,13 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
if (!dev)
return;
CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
- privptr = dev->priv;
+ privptr = dev->ml_priv;
if (dev->flags & IFF_RUNNING)
claw_release(dev);
if (privptr) {
privptr->channel[READ].ndev = NULL; /* say it's free */
}
- dev->priv=NULL;
+ dev->ml_priv = NULL;
#ifdef MODULE
if (free_dev) {
free_netdev(dev);
@@ -2921,7 +2918,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
goto out;
}
- dev->priv = privptr;
+ dev->ml_priv = privptr;
cgdev->dev.driver_data = privptr;
cgdev->cdev[READ]->dev.driver_data = privptr;
cgdev->cdev[WRITE]->dev.driver_data = privptr;
@@ -3002,7 +2999,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
ret = claw_release(ndev);
ndev->flags &=~IFF_RUNNING;
unregister_netdev(ndev);
- ndev->priv = NULL; /* cgdev data, not ndev's to free */
+ ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
claw_free_netdevice(ndev, 1);
priv->channel[READ].ndev = NULL;
priv->channel[WRITE].ndev = NULL;
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 0b4e625..4277655 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -245,7 +245,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct sk_buff *skb;
int first = 1;
int i;
@@ -336,7 +336,7 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
@@ -357,7 +357,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
struct sk_buff *skb = ch->trans_skb;
__u16 block_len = *((__u16 *)skb->data);
@@ -459,7 +459,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
chx_rxidle(fi, event, arg);
} else {
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
fsm_newstate(fi, CTC_STATE_TXIDLE);
fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
}
@@ -496,7 +496,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
(ch->protocol == CTCM_PROTO_S390)) {
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
}
}
@@ -514,7 +514,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
__u16 buflen;
int rc;
@@ -699,7 +699,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
struct channel *ch)
{
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
"%s(%s): %s[%d]\n",
@@ -784,7 +784,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
/*
* Special case: Got UC_RCRESET on setmode.
@@ -874,7 +874,7 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
if (event == CTC_EVENT_TIMER) {
if (!IS_MPCDEV(dev))
@@ -902,7 +902,7 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): RX %s busy, init. fail",
@@ -923,7 +923,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
struct channel *ch = arg;
struct channel *ch2;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
"%s: %s: remote disconnect - re-init ...",
@@ -954,7 +954,7 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
if (event == CTC_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
@@ -984,7 +984,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct sk_buff *skb;
CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
@@ -1057,7 +1057,7 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
int rd = CHANNEL_DIRECTION(ch->flags);
fsm_deltimer(&ch->timer);
@@ -1207,7 +1207,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *skb;
int first = 1;
@@ -1368,7 +1368,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *skb = ch->trans_skb;
struct sk_buff *new_skb;
@@ -1471,7 +1471,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *gptr = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
@@ -1525,7 +1525,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
int rc;
unsigned long saveflags = 0; /* avoids compiler warning */
@@ -1580,7 +1580,7 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
@@ -1639,7 +1639,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
@@ -1724,7 +1724,7 @@ static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
@@ -1740,7 +1740,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
{
struct channel *ach = arg;
struct net_device *dev = ach->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *wch = priv->channel[WRITE];
struct channel *rch = priv->channel[READ];
@@ -2050,7 +2050,7 @@ int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
static void dev_action_start(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
int direction;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
@@ -2076,7 +2076,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
{
int direction;
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
@@ -2096,7 +2096,7 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
{
int restart_timer;
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(TRACE, dev, "");
@@ -2133,12 +2133,12 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
static void dev_action_chup(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
int dev_stat = fsm_getstate(fi);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
"%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
- dev->name, dev->priv, dev_stat, event);
+ dev->name, dev->ml_priv, dev_stat, event);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:
@@ -2195,7 +2195,7 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
{
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 126a3eb..b11fec2 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -69,7 +69,7 @@ struct channel *channels;
void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
__u16 len = *((__u16 *) pskb->data);
skb_put(pskb, 2 + LL_HEADER_LENGTH);
@@ -414,7 +414,7 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
*/
int ctcm_open(struct net_device *dev)
{
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
if (!IS_MPC(priv))
@@ -432,7 +432,7 @@ int ctcm_open(struct net_device *dev)
*/
int ctcm_close(struct net_device *dev)
{
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
CTCMY_DBF_DEV_NAME(SETUP, dev, "");
if (!IS_MPC(priv))
@@ -573,7 +573,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
skb_pull(skb, LL_HEADER_LENGTH + 2);
} else if (ccw_idx == 0) {
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
}
@@ -592,7 +592,7 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
struct channel *ch;
/* int rc = 0; */
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
ch = priv->channel[WRITE];
@@ -652,7 +652,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
struct pdu *p_header;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct th_header *header;
struct sk_buff *nskb;
@@ -867,7 +867,7 @@ done:
/* first merge version - leaving both functions separated */
static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
if (skb == NULL) {
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
@@ -911,7 +911,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
{
int len = 0;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct sk_buff *newskb = NULL;
@@ -1025,7 +1025,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 576 || new_mtu > 65527)
return -EINVAL;
- priv = dev->priv;
+ priv = dev->ml_priv;
max_bufsize = priv->channel[READ]->max_bufsize;
if (IS_MPC(priv)) {
@@ -1050,7 +1050,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
*/
static struct net_device_stats *ctcm_stats(struct net_device *dev)
{
- return &((struct ctcm_priv *)dev->priv)->stats;
+ return &((struct ctcm_priv *)dev->ml_priv)->stats;
}
static void ctcm_free_netdevice(struct net_device *dev)
@@ -1060,7 +1060,7 @@ static void ctcm_free_netdevice(struct net_device *dev)
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s(%s)", CTCM_FUNTAIL, dev->name);
- priv = dev->priv;
+ priv = dev->ml_priv;
if (priv) {
grp = priv->mpcg;
if (grp) {
@@ -1125,7 +1125,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
CTCM_FUNTAIL);
return NULL;
}
- dev->priv = priv;
+ dev->ml_priv = priv;
priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
dev_fsm, dev_fsm_len, GFP_KERNEL);
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index a72e0fe..8e10ee8 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -229,14 +229,14 @@ void ctcm_remove_files(struct device *dev);
*/
static inline void ctcm_clear_busy_do(struct net_device *dev)
{
- clear_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
+ clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
netif_wake_queue(dev);
}
static inline void ctcm_clear_busy(struct net_device *dev)
{
struct mpc_group *grp;
- grp = ((struct ctcm_priv *)dev->priv)->mpcg;
+ grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg;
if (!(grp && grp->in_sweep))
ctcm_clear_busy_do(dev);
@@ -246,7 +246,8 @@ static inline void ctcm_clear_busy(struct net_device *dev)
static inline int ctcm_test_and_set_busy(struct net_device *dev)
{
netif_stop_queue(dev);
- return test_and_set_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy));
+ return test_and_set_bit(0,
+ &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
}
extern int loglevel;
@@ -292,7 +293,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
-#define IS_MPCDEV(d) IS_MPC((struct ctcm_priv *)d->priv)
+#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
static inline gfp_t gfp_type(void)
{
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 49ae1cd..cbe4704 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -19,7 +19,6 @@
#undef DEBUGDATA
#undef DEBUGCCW
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -313,10 +312,10 @@ static struct net_device *ctcmpc_get_dev(int port_num)
CTCM_FUNTAIL, device);
return NULL;
}
- priv = dev->priv;
+ priv = dev->ml_priv;
if (priv == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
- "%s(%s): dev->priv is NULL",
+ "%s(%s): dev->ml_priv is NULL",
CTCM_FUNTAIL, device);
return NULL;
}
@@ -345,7 +344,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return 1;
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
grp->allochanfunc = callback;
@@ -417,7 +416,7 @@ void ctc_mpc_establish_connectivity(int port_num,
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
rch = priv->channel[READ];
wch = priv->channel[WRITE];
@@ -535,7 +534,7 @@ void ctc_mpc_dealloc_ch(int port_num)
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
@@ -571,7 +570,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
dev = ctcmpc_get_dev(port_num);
if (dev == NULL)
return;
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -620,7 +619,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
{
struct channel *rch = mpcginfo->ch;
struct net_device *dev = rch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[WRITE];
@@ -651,7 +650,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
static void ctcmpc_send_sweep_resp(struct channel *rch)
{
struct net_device *dev = rch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
int rc = 0;
struct th_sweep *header;
@@ -713,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
{
struct channel *rch = mpcginfo->ch;
struct net_device *dev = rch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[WRITE];
@@ -847,7 +846,7 @@ static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
{
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
if (grp == NULL) {
@@ -891,7 +890,7 @@ static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
void mpc_group_ready(unsigned long adev)
{
struct net_device *dev = (struct net_device *)adev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct channel *ch = NULL;
@@ -947,7 +946,7 @@ void mpc_group_ready(unsigned long adev)
void mpc_channel_action(struct channel *ch, int direction, int action)
{
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
if (grp == NULL) {
@@ -1057,7 +1056,7 @@ done:
static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct pdu *curr_pdu;
struct mpcg_info *mpcginfo;
@@ -1255,7 +1254,7 @@ void ctcmpc_bh(unsigned long thischan)
struct channel *ch = (struct channel *)thischan;
struct sk_buff *skb;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
@@ -1377,7 +1376,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
BUG_ON(dev == NULL);
CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
grp->flow_off_called = 0;
fsm_deltimer(&grp->timer);
@@ -1483,7 +1482,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
BUG_ON(dev == NULL);
- priv = dev->priv;
+ priv = dev->ml_priv;
grp = priv->mpcg;
wch = priv->channel[WRITE];
rch = priv->channel[READ];
@@ -1521,7 +1520,7 @@ void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
if (ch) {
dev = ch->netdev;
if (dev) {
- priv = dev->priv;
+ priv = dev->ml_priv;
if (priv) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
"%s: %s: %s\n",
@@ -1569,7 +1568,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
{
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
struct xid2 *xid = mpcginfo->xid;
int rc = 0;
@@ -1866,7 +1865,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
{
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
@@ -1906,7 +1905,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
{
struct net_device *dev = arg;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = NULL;
int direction;
int send = 0;
@@ -1983,7 +1982,7 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
@@ -2045,7 +2044,7 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
struct mpcg_info *mpcginfo = arg;
struct channel *ch = mpcginfo->ch;
struct net_device *dev = ch->netdev;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
@@ -2097,7 +2096,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
__u32 new_len = 0;
struct sk_buff *skb;
struct qllc *qllcptr;
- struct ctcm_priv *priv = dev->priv;
+ struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg;
CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 6de2838..9bcfa04 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1412,7 +1412,8 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
/* How far in the ccw chain have we processed? */
if ((channel->state != LCS_CH_STATE_INIT) &&
- (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) {
+ (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (irb->scsw.cmd.cpa != 0)) {
index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
- channel->ccws;
if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 80971c2..bf8a75c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -689,6 +689,7 @@ struct qeth_mc_mac {
struct list_head list;
__u8 mc_addr[MAX_ADDR_LEN];
unsigned char mc_addrlen;
+ int is_vmac;
};
struct qeth_card {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index bd420d1..c7ab1b8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3024,7 +3024,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
- int length = skb->len;
+ int length = skb->len - offset;
int length_here;
int element;
char *data;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b3cee03..3ac3cc1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -177,9 +177,10 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
qeth_l2_send_delgroupmac_cb);
}
-static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
+static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
{
struct qeth_mc_mac *mc;
+ int rc;
mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
@@ -188,8 +189,16 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac)
memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
mc->mc_addrlen = OSA_ADDR_LEN;
+ mc->is_vmac = vmac;
+
+ if (vmac) {
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
+ NULL);
+ } else {
+ rc = qeth_l2_send_setgroupmac(card, mac);
+ }
- if (!qeth_l2_send_setgroupmac(card, mac))
+ if (!rc)
list_add_tail(&mc->list, &card->mc_list);
else
kfree(mc);
@@ -201,7 +210,11 @@ static void qeth_l2_del_all_mc(struct qeth_card *card)
spin_lock_bh(&card->mclock);
list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
- qeth_l2_send_delgroupmac(card, mc->mc_addr);
+ if (mc->is_vmac)
+ qeth_l2_send_setdelmac(card, mc->mc_addr,
+ IPA_CMD_DELVMAC, NULL);
+ else
+ qeth_l2_send_delgroupmac(card, mc->mc_addr);
list_del(&mc->list);
kfree(mc);
}
@@ -590,7 +603,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- struct dev_mc_list *dm;
+ struct dev_addr_list *dm;
if (card->info.type == QETH_CARD_TYPE_OSN)
return ;
@@ -599,7 +612,11 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
qeth_l2_del_all_mc(card);
spin_lock_bh(&card->mclock);
for (dm = dev->mc_list; dm; dm = dm->next)
- qeth_l2_add_mc(card, dm->dmi_addr);
+ qeth_l2_add_mc(card, dm->da_addr, 0);
+
+ for (dm = dev->uc_list; dm; dm = dm->next)
+ qeth_l2_add_mc(card, dm->da_addr, 1);
+
spin_unlock_bh(&card->mclock);
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
return;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index ac19937..210ddb6 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -136,7 +136,7 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
return -EINVAL;
if (!qeth_is_supported(card, IPA_IPV6)) {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return qeth_l3_dev_route_store(card, &card->options.route6,
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 73a86d0..9c12924 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -7,13 +7,13 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include <asm/system.h>
#include <asm/sbus.h>
#include <asm/dma.h>
#include <asm/oplib.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/bpp.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fcdd73f..994da56 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
}
-const struct scsi_dh_devlist alua_dev_list[] = {
+static const struct scsi_dh_devlist alua_dev_list[] = {
{"HP", "MSA VOLUME" },
{"HP", "HSV101" },
{"HP", "HSV111" },
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index aa46b13..b9d23e9 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -562,7 +562,7 @@ done:
return result;
}
-const struct scsi_dh_devlist clariion_dev_list[] = {
+static const struct scsi_dh_devlist clariion_dev_list[] = {
{"DGC", "RAID"},
{"DGC", "DISK"},
{"DGC", "VRAID"},
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9c7a1f8..a6a4ef3 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -282,7 +282,7 @@ static int hp_sw_activate(struct scsi_device *sdev)
return ret;
}
-const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
+static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
{"COMPAQ", "MSA1000 VOLUME"},
{"COMPAQ", "HSV110"},
{"HP", "HSV100"},
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b093a50..2dee69d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -376,7 +376,7 @@ static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
return SCSI_DH_NOSYS;
- h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun);
+ h->lun = inqp->lun[7]; /* Uses only the last byte */
}
return err;
}
@@ -386,6 +386,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
int err;
struct c9_inquiry *inqp;
+ h->lun_state = RDAC_LUN_UNOWNED;
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c9;
@@ -574,7 +575,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
-const struct scsi_dh_devlist rdac_dev_list[] = {
+static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 19406ce..179ad77 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -21,7 +21,6 @@
#include <linux/i2o-dev.h>
-#include <linux/version.h>
#include <linux/notifier.h>
#include <asm/atomic.h>
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ae560bc..4e0b7c8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -556,11 +556,12 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
/**
* ibmvfc_init_host - Start host initialization
* @vhost: ibmvfc host struct
+ * @relogin: is this a re-login?
*
* Return value:
* nothing
**/
-static void ibmvfc_init_host(struct ibmvfc_host *vhost)
+static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
{
struct ibmvfc_target *tgt;
@@ -574,6 +575,11 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
}
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ if (!relogin) {
+ memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+ vhost->async_crq.cur = 0;
+ }
+
list_for_each_entry(tgt, &vhost->targets, queue)
tgt->need_login = 1;
scsi_block_requests(vhost->host);
@@ -1059,9 +1065,10 @@ static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
{
long timeout = wait_event_timeout(vhost->init_wait_q,
- (vhost->state == IBMVFC_ACTIVE ||
- vhost->state == IBMVFC_HOST_OFFLINE ||
- vhost->state == IBMVFC_LINK_DEAD),
+ ((vhost->state == IBMVFC_ACTIVE ||
+ vhost->state == IBMVFC_HOST_OFFLINE ||
+ vhost->state == IBMVFC_LINK_DEAD) &&
+ vhost->action == IBMVFC_HOST_ACTION_NONE),
(init_timeout * HZ));
return timeout ? 0 : -EIO;
@@ -1450,8 +1457,8 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
struct scsi_cmnd *cmnd = evt->cmnd;
- int rsp_len = 0;
- int sense_len = rsp->fcp_sense_len;
+ u32 rsp_len = 0;
+ u32 sense_len = rsp->fcp_sense_len;
if (cmnd) {
if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
@@ -1468,7 +1475,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
rsp_len = rsp->fcp_rsp_len;
if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
- if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
+ if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
ibmvfc_log_error(evt);
@@ -2077,17 +2084,18 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
{
const char *desc = ibmvfc_get_ae_desc(crq->event);
- ibmvfc_log(vhost, 3, "%s event received\n", desc);
+ ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx,"
+ " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
switch (crq->event) {
case IBMVFC_AE_LINK_UP:
case IBMVFC_AE_RESUME:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
- ibmvfc_init_host(vhost);
+ ibmvfc_init_host(vhost, 1);
break;
case IBMVFC_AE_SCN_FABRIC:
vhost->events_to_log |= IBMVFC_AE_RSCN;
- ibmvfc_init_host(vhost);
+ ibmvfc_init_host(vhost, 1);
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
@@ -2133,13 +2141,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
/* Send back a response */
rc = ibmvfc_send_crq_init_complete(vhost);
if (rc == 0)
- ibmvfc_init_host(vhost);
+ ibmvfc_init_host(vhost, 0);
else
dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
break;
case IBMVFC_CRQ_INIT_COMPLETE:
dev_info(vhost->dev, "Partner initialization complete\n");
- ibmvfc_init_host(vhost);
+ ibmvfc_init_host(vhost, 0);
break;
default:
dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
@@ -3357,8 +3365,6 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
mad->buffer.va = vhost->login_buf_dma;
mad->buffer.len = sizeof(*vhost->login_buf);
- memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
- vhost->async_crq.cur = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -3601,8 +3607,9 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
}
}
- if (vhost->reinit) {
+ if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
vhost->reinit = 0;
+ scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 4bf6e37..fb3177a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.1"
-#define IBMVFC_DRIVER_DATE "(July 11, 2008)"
+#define IBMVFC_DRIVER_VERSION "1.0.2"
+#define IBMVFC_DRIVER_DATE "(August 14, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
#define IBMVFC_INIT_TIMEOUT 30
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 6b24b9c..7b1502c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1636,7 +1636,7 @@ static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
unsigned long desired_io = max_requests * sizeof(union viosrp_iu);
/* add io space for sg data */
- desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT *
+ desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
IBMVSCSI_CMDS_PER_LUN_DEFAULT);
return desired_io;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 7c615c7..bc9e6dd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -165,7 +165,6 @@
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/stddef.h>
-#include <linux/version.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index e0657b6..4e49fbc 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -50,7 +50,6 @@
#ifndef _IPS_H_
#define _IPS_H_
-#include <linux/version.h>
#include <linux/nmi.h>
#include <asm/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 90272e6..094b47e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -27,7 +27,6 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
-#include <linux/version.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index fc7ac15..97b7633 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
- * Version : v00.00.03.20-rc1
+ * Version : v00.00.04.01-rc1
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@@ -71,6 +71,10 @@ static struct pci_device_id megasas_pci_table[] = {
/* ppc IOP */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
/* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
+ /* gen2*/
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
/* xscale IOP, vega */
{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
@@ -198,6 +202,9 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
*/
writel(status, &regs->outbound_intr_status);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
return 0;
}
@@ -293,6 +300,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
*/
writel(status, &regs->outbound_doorbell_clear);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_doorbell_clear);
+
return 0;
}
/**
@@ -318,6 +328,99 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
};
/**
+* The following functions are defined for gen2 (deviceid : 0x78 0x79)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_gen2 - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ /* write ~0x00000005 (4 & 1) to the intr mask*/
+ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_gen2 - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+ u32 mask = 0xFFFFFFFF;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_gen2 - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK))
+ return 1;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return 0;
+}
+/**
+ * megasas_fire_cmd_gen2 - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_gen2(dma_addr_t frame_phys_addr, u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+}
+
+static struct megasas_instance_template megasas_instance_template_gen2 = {
+
+ .fire_cmd = megasas_fire_cmd_gen2,
+ .enable_intr = megasas_enable_intr_gen2,
+ .disable_intr = megasas_disable_intr_gen2,
+ .clear_intr = megasas_clear_intr_gen2,
+ .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
+};
+
+/**
* This is the end of set of functions & definitions
* specific to ppc (deviceid : 0x60) controllers
*/
@@ -1976,7 +2079,12 @@ static int megasas_init_mfi(struct megasas_instance *instance)
/*
* Map the message registers
*/
- instance->base_addr = pci_resource_start(instance->pdev, 0);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
+ instance->base_addr = pci_resource_start(instance->pdev, 1);
+ } else {
+ instance->base_addr = pci_resource_start(instance->pdev, 0);
+ }
if (pci_request_regions(instance->pdev, "megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
@@ -1998,6 +2106,10 @@ static int megasas_init_mfi(struct megasas_instance *instance)
case PCI_DEVICE_ID_LSI_SAS1078DE:
instance->instancet = &megasas_instance_template_ppc;
break;
+ case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+ case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+ instance->instancet = &megasas_instance_template_gen2;
+ break;
case PCI_DEVICE_ID_LSI_SAS1064R:
case PCI_DEVICE_ID_DELL_PERC5:
default:
@@ -2857,6 +2969,7 @@ static void megasas_shutdown(struct pci_dev *pdev)
{
struct megasas_instance *instance = pci_get_drvdata(pdev);
megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
}
/**
@@ -3292,7 +3405,7 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
return retval;
}
-static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
+static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
megasas_sysfs_set_dbg_lvl);
static ssize_t
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index b0c41e6..0d03324 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.03.20-rc1"
-#define MEGASAS_RELDATE "March 10, 2008"
-#define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008"
+#define MEGASAS_VERSION "00.00.04.01"
+#define MEGASAS_RELDATE "July 24, 2008"
+#define MEGASAS_EXT_VERSION "Thu July 24 11:41:51 PST 2008"
/*
* Device IDs
@@ -28,6 +28,8 @@
#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C
#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
+#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078
+#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
/*
* =====================================
@@ -580,6 +582,8 @@ struct megasas_ctrl_info {
#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
/*
* register set for both 1068 and 1078 controllers
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index edf9fdb..22052bb 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -23,7 +23,6 @@
* 1.2: PowerPC (big endian) support.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
index 6715ecb..9565acf 100644
--- a/drivers/scsi/nsp32.h
+++ b/drivers/scsi/nsp32.h
@@ -16,7 +16,6 @@
#ifndef _NSP32_H
#define _NSP32_H
-#include <linux/version.h>
//#define NSP32_DEBUG 9
/*
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index a221b6e..24e6cb8 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -25,7 +25,6 @@
***********************************************************************/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index a319a20..45e7dcb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -993,6 +993,17 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
{
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ /*
+ * At this point all fcport's software-states are cleared. Perform any
+ * final cleanup of firmware resources (PCBs and XCBs).
+ */
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+ fcport->loop_id = FC_NO_LOOP_ID;
+ }
+
qla2x00_abort_fcport_cmds(fcport);
scsi_target_unblock(&rport->dev);
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6da31ba..94a720e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2237,6 +2237,7 @@ typedef struct scsi_qla_host {
#define REGISTER_FDMI_NEEDED 26
#define FCPORT_UPDATE_NEEDED 27
#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
+#define UNLOADING 29
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 601a6b2..ee89ddd 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -976,8 +976,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
&ha->fw_attributes, &ha->fw_memory_size);
qla2x00_resize_request_q(ha);
ha->flags.npiv_supported = 0;
- if ((IS_QLA24XX(ha) || IS_QLA25XX(ha)) &&
- (ha->fw_attributes & BIT_2)) {
+ if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) ||
+ IS_QLA84XX(ha)) &&
+ (ha->fw_attributes & BIT_2)) {
ha->flags.npiv_supported = 1;
if ((!ha->max_npiv_vports) ||
((ha->max_npiv_vports + 1) %
@@ -3251,6 +3252,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
{
int rval;
uint8_t status = 0;
+ scsi_qla_host_t *vha;
if (ha->flags.online) {
ha->flags.online = 0;
@@ -3265,6 +3267,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(ha, 0);
+ list_for_each_entry(vha, &ha->vp_list, vp_list)
+ qla2x00_mark_all_devices_lost(vha, 0);
} else {
if (!atomic_read(&ha->loop_down_timer))
atomic_set(&ha->loop_down_timer,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 874d802..45a3b93 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -879,11 +879,12 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
sp->request_sense_ptr += sense_len;
sp->request_sense_length -= sense_len;
if (sp->request_sense_length != 0)
- sp->ha->status_srb = sp;
+ sp->fcport->ha->status_srb = sp;
DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
- "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, cp, cp->serial_number));
+ "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no,
+ cp->device->channel, cp->device->id, cp->device->lun, cp,
+ cp->serial_number));
if (sense_len)
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
CMD_ACTUAL_SNSLEN(cp)));
@@ -1184,9 +1185,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
atomic_read(&fcport->state)));
cp->result = DID_BUS_BUSY << 16;
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(ha, fcport, 1, 1);
- }
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
break;
case CS_RESET:
@@ -1229,7 +1229,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
/* Check to see if logout occurred. */
if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
- qla2x00_mark_device_lost(ha, fcport, 1, 1);
+ qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
break;
default:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index bc90d6b..813bc77 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2686,7 +2686,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
- wake_up_process(ha->dpc_thread);
+ qla2xxx_wake_dpc(ha);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 50baf6a..93560cd 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,7 +6,6 @@
*/
#include "qla_def.h"
-#include <linux/version.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/smp_lock.h>
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7c8af7e..26afe44 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -780,7 +780,8 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
sp = pha->outstanding_cmds[cnt];
if (!sp)
continue;
- if (ha->vp_idx != sp->ha->vp_idx)
+
+ if (ha->vp_idx != sp->fcport->ha->vp_idx)
continue;
match = 0;
switch (type) {
@@ -1080,9 +1081,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
sp = ha->outstanding_cmds[cnt];
if (sp) {
ha->outstanding_cmds[cnt] = NULL;
- sp->flags = 0;
sp->cmd->result = res;
- sp->cmd->host_scribble = (unsigned char *)NULL;
qla2x00_sp_compl(ha, sp);
}
}
@@ -1776,10 +1775,15 @@ probe_out:
static void
qla2x00_remove_one(struct pci_dev *pdev)
{
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *ha, *vha, *temp;
ha = pci_get_drvdata(pdev);
+ list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list)
+ fc_vport_terminate(vha->fc_vport);
+
+ set_bit(UNLOADING, &ha->dpc_flags);
+
qla2x00_dfs_remove(ha);
qla84xx_put_chip(ha);
@@ -2451,8 +2455,10 @@ qla2x00_do_dpc(void *data)
void
qla2xxx_wake_dpc(scsi_qla_host_t *ha)
{
- if (ha->dpc_thread)
- wake_up_process(ha->dpc_thread);
+ struct task_struct *t = ha->dpc_thread;
+
+ if (!test_bit(UNLOADING, &ha->dpc_flags) && t)
+ wake_up_process(t);
}
/*
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 676c390..4160e4c 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k6"
+#define QLA2XXX_VERSION "8.02.01-k7"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3d36270..661f9f2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -217,6 +217,18 @@ static int sg_last_dev(void);
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
+static int sg_allow_access(struct file *filp, unsigned char *cmd)
+{
+ struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
+ struct request_queue *q = sfp->parentdp->device->request_queue;
+
+ if (sfp->parentdp->device->type == TYPE_SCANNER)
+ return 0;
+
+ return blk_verify_command(&q->cmd_filter,
+ cmd, filp->f_mode & FMODE_WRITE);
+}
+
static int
sg_open(struct inode *inode, struct file *filp)
{
@@ -689,7 +701,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EFAULT;
}
- if (read_only && !blk_verify_command(file, cmnd)) {
+ if (read_only && sg_allow_access(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
@@ -793,6 +805,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
+
SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
sdp->disk->disk_name, (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1061,7 +1074,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
- if (!blk_verify_command(filp, &opcode))
+ if (sg_allow_access(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 3b4a14e..77cb342 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -449,6 +449,7 @@ config SERIAL_CLPS711X_CONSOLE
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
depends on ARM && PLAT_S3C24XX
+ select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
providing /dev/ttySAC0, 1 and 2 (note, some machines may not
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index efcd443..4a0d30b 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -28,7 +28,7 @@
#endif
#include <asm/gpio.h>
-#include <asm/mach/bfin_serial_5xx.h>
+#include <mach/bfin_serial_5xx.h>
#ifdef CONFIG_SERIAL_BFIN_DMA
#include <linux/dma-mapping.h>
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index aeeec55..e41766d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -17,11 +17,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/irq.h>
#if defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 15ee497..29b4458 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -32,11 +32,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index e24e682..a378464 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -35,11 +35,11 @@
#include <linux/serial_reg.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0f3d69b..3cb4c8a 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -32,11 +32,11 @@
#include <linux/serio.h>
#endif
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/of_device.h>
#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 964124b..75e8686 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -226,10 +226,11 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
* Companion function to spi_alloc_device. Devices allocated with
* spi_alloc_device can be added onto the spi bus with this function.
*
- * Returns 0 on success; non-zero on failure
+ * Returns 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
+ static DEFINE_MUTEX(spi_add_lock);
struct device *dev = spi->master->dev.parent;
int status;
@@ -246,26 +247,43 @@ int spi_add_device(struct spi_device *spi)
"%s.%u", spi->master->dev.bus_id,
spi->chip_select);
- /* drivers may modify this initial i/o setup */
+
+ /* We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration. Lock against concurrent add() calls.
+ */
+ mutex_lock(&spi_add_lock);
+
+ if (bus_find_device_by_name(&spi_bus_type, NULL, spi->dev.bus_id)
+ != NULL) {
+ dev_err(dev, "chipselect %d already in use\n",
+ spi->chip_select);
+ status = -EBUSY;
+ goto done;
+ }
+
+ /* Drivers may modify this initial i/o setup, but will
+ * normally rely on the device being setup. Devices
+ * using SPI_CS_HIGH can't coexist well otherwise...
+ */
status = spi->master->setup(spi);
if (status < 0) {
dev_err(dev, "can't %s %s, status %d\n",
"setup", spi->dev.bus_id, status);
- return status;
+ goto done;
}
- /* driver core catches callers that misbehave by defining
- * devices that already exist.
- */
+ /* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0) {
+ if (status < 0)
dev_err(dev, "can't %s %s, status %d\n",
"add", spi->dev.bus_id, status);
- return status;
- }
+ else
+ dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
- dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
- return 0;
+done:
+ mutex_unlock(&spi_add_lock);
+ return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index d831a2b..87ab244 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -1165,15 +1165,19 @@ EXPORT_SYMBOL(ssb_dma_translation);
int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
{
+#ifdef CONFIG_SSB_PCIHOST
int err;
+#endif
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
err = pci_set_dma_mask(dev->bus->host_pci, mask);
if (err)
return err;
err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
return err;
+#endif
case SSB_BUSTYPE_SSB:
return dma_set_mask(dev->dev, mask);
default:
@@ -1188,6 +1192,7 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
if (gfp_flags & GFP_DMA) {
/* Workaround: The PCI API does not support passing
* a GFP flag. */
@@ -1195,6 +1200,7 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
size, dma_handle, gfp_flags);
}
return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
+#endif
case SSB_BUSTYPE_SSB:
return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
default:
@@ -1210,6 +1216,7 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
if (gfp_flags & GFP_DMA) {
/* Workaround: The PCI API does not support passing
* a GFP flag. */
@@ -1220,6 +1227,7 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
pci_free_consistent(dev->bus->host_pci, size,
vaddr, dma_handle);
return;
+#endif
case SSB_BUSTYPE_SSB:
dma_free_coherent(dev->dev, size, vaddr, dma_handle);
return;
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 2e9079d..4190be6 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -33,6 +33,19 @@ config UIO_PDRV
If you don't know what to do here, say N.
+config UIO_PDRV_GENIRQ
+ tristate "Userspace I/O platform driver with generic IRQ handling"
+ help
+ Platform driver for Userspace I/O devices, including generic
+ interrupt handling code. Shared interrupts are not supported.
+
+ This kernel driver requires that the matching userspace driver
+ handles interrupts in a special way. Userspace is responsible
+ for acknowledging the hardware device if needed, and re-enabling
+ interrupts in the interrupt controller using the write() syscall.
+
+ If you don't know what to do here, say N.
+
config UIO_SMX
tristate "SMX cryptengine UIO interface"
default n
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index e00ce0d..8667bbd 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_UIO) += uio.o
obj-$(CONFIG_UIO_CIF) += uio_cif.o
obj-$(CONFIG_UIO_PDRV) += uio_pdrv.o
+obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o
obj-$(CONFIG_UIO_SMX) += uio_smx.o
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
index 5d0d2e8..0b4ef39 100644
--- a/drivers/uio/uio_pdrv.c
+++ b/drivers/uio/uio_pdrv.c
@@ -88,6 +88,8 @@ static int uio_pdrv_remove(struct platform_device *pdev)
uio_unregister_device(pdata->uioinfo);
+ kfree(pdata);
+
return 0;
}
@@ -114,5 +116,5 @@ module_exit(uio_pdrv_exit);
MODULE_AUTHOR("Uwe Kleine-Koenig");
MODULE_DESCRIPTION("Userspace I/O platform driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
new file mode 100644
index 0000000..1f82c83
--- /dev/null
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -0,0 +1,188 @@
+/*
+ * drivers/uio/uio_pdrv_genirq.c
+ *
+ * Userspace I/O platform driver with generic IRQ handling code.
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on uio_pdrv.c by Uwe Kleine-Koenig,
+ * Copyright (C) 2008 by Digi International Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/uio_driver.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/stringify.h>
+
+#define DRIVER_NAME "uio_pdrv_genirq"
+
+struct uio_pdrv_genirq_platdata {
+ struct uio_info *uioinfo;
+ spinlock_t lock;
+ unsigned long flags;
+};
+
+static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
+{
+ struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
+
+ /* Just disable the interrupt in the interrupt controller, and
+ * remember the state so we can allow user space to enable it later.
+ */
+
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
+{
+ struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
+ unsigned long flags;
+
+ /* Allow user space to enable and disable the interrupt
+ * in the interrupt controller, but keep track of the
+ * state to prevent per-irq depth damage.
+ *
+ * Serialize this operation to support multiple tasks.
+ */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (irq_on) {
+ if (test_and_clear_bit(0, &priv->flags))
+ enable_irq(dev_info->irq);
+ } else {
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq(dev_info->irq);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int uio_pdrv_genirq_probe(struct platform_device *pdev)
+{
+ struct uio_info *uioinfo = pdev->dev.platform_data;
+ struct uio_pdrv_genirq_platdata *priv;
+ struct uio_mem *uiomem;
+ int ret = -EINVAL;
+ int i;
+
+ if (!uioinfo || !uioinfo->name || !uioinfo->version) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ goto bad0;
+ }
+
+ if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags) {
+ dev_err(&pdev->dev, "interrupt configuration error\n");
+ goto bad0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "unable to kmalloc\n");
+ goto bad0;
+ }
+
+ priv->uioinfo = uioinfo;
+ spin_lock_init(&priv->lock);
+ priv->flags = 0; /* interrupt is enabled to begin with */
+
+ uiomem = &uioinfo->mem[0];
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct resource *r = &pdev->resource[i];
+
+ if (r->flags != IORESOURCE_MEM)
+ continue;
+
+ if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
+ dev_warn(&pdev->dev, "device has more than "
+ __stringify(MAX_UIO_MAPS)
+ " I/O memory resources.\n");
+ break;
+ }
+
+ uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->addr = r->start;
+ uiomem->size = r->end - r->start + 1;
+ ++uiomem;
+ }
+
+ while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
+ uiomem->size = 0;
+ ++uiomem;
+ }
+
+ /* This driver requires no hardware specific kernel code to handle
+ * interrupts. Instead, the interrupt handler simply disables the
+ * interrupt in the interrupt controller. User space is responsible
+ * for performing hardware specific acknowledge and re-enabling of
+ * the interrupt in the interrupt controller.
+ *
+ * Interrupt sharing is not supported.
+ */
+
+ uioinfo->irq_flags = IRQF_DISABLED;
+ uioinfo->handler = uio_pdrv_genirq_handler;
+ uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
+ uioinfo->priv = priv;
+
+ ret = uio_register_device(&pdev->dev, priv->uioinfo);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register uio device\n");
+ goto bad1;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+ bad1:
+ kfree(priv);
+ bad0:
+ return ret;
+}
+
+static int uio_pdrv_genirq_remove(struct platform_device *pdev)
+{
+ struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev);
+
+ uio_unregister_device(priv->uioinfo);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver uio_pdrv_genirq = {
+ .probe = uio_pdrv_genirq_probe,
+ .remove = uio_pdrv_genirq_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init uio_pdrv_genirq_init(void)
+{
+ return platform_driver_register(&uio_pdrv_genirq);
+}
+
+static void __exit uio_pdrv_genirq_exit(void)
+{
+ platform_driver_unregister(&uio_pdrv_genirq);
+}
+
+module_init(uio_pdrv_genirq_init);
+module_exit(uio_pdrv_genirq_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 755823c..bcefbdd 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,16 +95,18 @@ config USB
source "drivers/usb/core/Kconfig"
+source "drivers/usb/mon/Kconfig"
+
source "drivers/usb/host/Kconfig"
+source "drivers/usb/musb/Kconfig"
+
source "drivers/usb/class/Kconfig"
source "drivers/usb/storage/Kconfig"
source "drivers/usb/image/Kconfig"
-source "drivers/usb/mon/Kconfig"
-
comment "USB port drivers"
depends on USB
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 507a9bd..9aea43a 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
offd = le32_to_cpu(buf[offb++]);
if (offd >= size) {
if (printk_ratelimit())
- usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n",
+ usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
offd, cm);
ret = -EIO;
goto cleanup;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index cb01b51..b6483dd 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -64,7 +64,6 @@
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0725b18..c257453 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,6 +51,7 @@
*/
#undef DEBUG
+#undef VERBOSE_DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -70,6 +71,9 @@
#include "cdc-acm.h"
+
+#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */
+
/*
* Version Information
*/
@@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex);
#define ACM_READY(acm) (acm && acm->dev && acm->used)
+#ifdef VERBOSE_DEBUG
+#define verbose 1
+#else
+#define verbose 0
+#endif
+
/*
* Functions for ACM control messages.
*/
@@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm)
static int acm_wb_is_avail(struct acm *acm)
{
int i, n;
+ unsigned long flags;
n = ACM_NW;
+ spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++) {
n -= acm->wb[i].use;
}
+ spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
-static inline int acm_wb_is_used(struct acm *acm, int wbn)
-{
- return acm->wb[wbn].use;
-}
-
/*
* Finish write.
*/
@@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
unsigned long flags;
spin_lock_irqsave(&acm->write_lock, flags);
- acm->write_ready = 1;
wb->use = 0;
acm->transmitting--;
spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
static int acm_write_start(struct acm *acm, int wbn)
{
unsigned long flags;
- struct acm_wb *wb;
+ struct acm_wb *wb = &acm->wb[wbn];
int rc;
spin_lock_irqsave(&acm->write_lock, flags);
if (!acm->dev) {
+ wb->use = 0;
spin_unlock_irqrestore(&acm->write_lock, flags);
return -ENODEV;
}
- if (!acm->write_ready) {
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0; /* A white lie */
- }
-
- wb = &acm->wb[wbn];
- if(acm_wb_is_avail(acm) <= 1)
- acm->write_ready = 0;
-
dbg("%s susp_count: %d", __func__, acm->susp_count);
if (acm->susp_count) {
- acm->old_ready = acm->write_ready;
acm->delayed_wb = wb;
- acm->write_ready = 0;
schedule_work(&acm->waker);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
usb_mark_last_busy(acm->dev);
- if (!acm_wb_is_used(acm, wbn)) {
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0;
- }
-
rc = acm_start_wb(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -488,22 +480,28 @@ urbs:
/* data interface wrote those outgoing bytes */
static void acm_write_bulk(struct urb *urb)
{
- struct acm *acm;
struct acm_wb *wb = urb->context;
+ struct acm *acm = wb->instance;
- dbg("Entering acm_write_bulk with status %d", urb->status);
+ if (verbose || urb->status
+ || (urb->actual_length != urb->transfer_buffer_length))
+ dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
+ urb->actual_length,
+ urb->transfer_buffer_length,
+ urb->status);
- acm = wb->instance;
acm_write_done(acm, wb);
if (ACM_READY(acm))
schedule_work(&acm->work);
+ else
+ wake_up_interruptible(&acm->drain_wait);
}
static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
- dbg("Entering acm_softint.");
-
+
+ dev_vdbg(&acm->data->dev, "tx work\n");
if (!ACM_READY(acm))
return;
tty_wakeup(acm->tty);
@@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work)
static void acm_waker(struct work_struct *waker)
{
struct acm *acm = container_of(waker, struct acm, waker);
- long flags;
int rv;
rv = usb_autopm_get_interface(acm->control);
@@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker)
acm_start_wb(acm, acm->delayed_wb);
acm->delayed_wb = NULL;
}
- spin_lock_irqsave(&acm->write_lock, flags);
- acm->write_ready = acm->old_ready;
- spin_unlock_irqrestore(&acm->write_lock, flags);
usb_autopm_put_interface(acm->control);
}
@@ -595,8 +589,8 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
tasklet_schedule(&acm->urb_task);
done:
-err_out:
mutex_unlock(&acm->mutex);
+err_out:
mutex_unlock(&open_mutex);
return rv;
@@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm)
kfree(acm);
}
+static int acm_tty_chars_in_buffer(struct tty_struct *tty);
+
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
@@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
+
+ /* try letting the last writes drain naturally */
+ wait_event_interruptible_timeout(acm->drain_wait,
+ (ACM_NW == acm_wb_is_avail(acm))
+ || !acm->dev,
+ ACM_CLOSE_TIMEOUT * HZ);
+
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
@@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty)
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
*/
- return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0;
+ return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
@@ -1072,11 +1075,11 @@ skip_normal_probe:
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
INIT_WORK(&acm->waker, acm_waker);
+ init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->write_ready = 1;
acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
@@ -1108,9 +1111,11 @@ skip_normal_probe:
rcv->instance = acm;
}
for (i = 0; i < num_rx_buf; i++) {
- struct acm_rb *buf = &(acm->rb[i]);
+ struct acm_rb *rb = &(acm->rb[i]);
- if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) {
+ rb->base = usb_buffer_alloc(acm->dev, readsize,
+ GFP_KERNEL, &rb->dma);
+ if (!rb->base) {
dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
goto alloc_fail7;
}
@@ -1172,6 +1177,7 @@ skip_countries:
acm_set_line(acm, &acm->line);
usb_driver_claim_interface(&acm_driver, data_interface, acm);
+ usb_set_intfdata(data_interface, acm);
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
@@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- mutex_lock(&open_mutex);
- if (!acm || !acm->dev) {
- mutex_unlock(&open_mutex);
+ /* sibling interface is already cleaning up */
+ if (!acm)
return;
- }
+
+ mutex_lock(&open_mutex);
if (acm->country_codes){
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1356,6 +1362,9 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 85c3aaa..1f95e7a 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,8 +106,6 @@ struct acm {
struct list_head spare_read_bufs;
struct list_head filled_read_bufs;
int write_used; /* number of non-empty write buffers */
- int write_ready; /* write urb is not running */
- int old_ready;
int processing;
int transmitting;
spinlock_t write_lock;
@@ -115,6 +113,7 @@ struct acm {
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
struct work_struct waker;
+ wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddb54e1..5a7fa6f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -230,6 +230,13 @@ static int usb_probe_interface(struct device *dev)
*/
intf->pm_usage_cnt = !(driver->supports_autosuspend);
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0) {
+ usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ intf->needs_altsetting0 = 0;
+ }
+
error = driver->probe(intf, id);
if (error) {
mark_quiesced(intf);
@@ -266,8 +273,17 @@ static int usb_unbind_interface(struct device *dev)
driver->disconnect(intf);
- /* reset other interface state */
- usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
+ /* Reset other interface state.
+ * We cannot do a Set-Interface if the device is suspended or
+ * if it is prepared for a system sleep (since installing a new
+ * altsetting means creating new endpoint device entries).
+ * When either of these happens, defer the Set-Interface.
+ */
+ if (!error && intf->dev.power.status == DPM_ON)
+ usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ else
+ intf->needs_altsetting0 = 1;
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
@@ -774,7 +790,6 @@ void usb_deregister(struct usb_driver *driver)
}
EXPORT_SYMBOL_GPL(usb_deregister);
-
/* Forced unbinding of a USB interface driver, either because
* it doesn't support pre_reset/post_reset/reset_resume or
* because it doesn't support suspend/resume.
@@ -799,7 +814,8 @@ void usb_forced_unbind_intf(struct usb_interface *intf)
* The caller must hold @intf's device's lock, but not its pm_mutex
* and not @intf->dev.sem.
*
- * FIXME: The caller must block system sleep transitions.
+ * Note: Rebinds will be skipped if a system sleep transition is in
+ * progress and the PM "complete" callback hasn't occurred yet.
*/
void usb_rebind_intf(struct usb_interface *intf)
{
@@ -815,12 +831,16 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- intf->needs_binding = 0;
- rc = device_attach(&intf->dev);
- if (rc < 0)
- dev_warn(&intf->dev, "rebind failed: %d\n", rc);
+ if (intf->dev.power.status == DPM_ON) {
+ intf->needs_binding = 0;
+ rc = device_attach(&intf->dev);
+ if (rc < 0)
+ dev_warn(&intf->dev, "rebind failed: %d\n", rc);
+ }
}
+#ifdef CONFIG_PM
+
#define DO_UNBIND 0
#define DO_REBIND 1
@@ -828,7 +848,6 @@ void usb_rebind_intf(struct usb_interface *intf)
* or rebind interfaces that have been unbound, according to @action.
*
* The caller must hold @udev's device lock.
- * FIXME: For rebinds, the caller must block system sleep transitions.
*/
static void do_unbind_rebind(struct usb_device *udev, int action)
{
@@ -850,30 +869,14 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
}
break;
case DO_REBIND:
- if (intf->needs_binding) {
-
- /* FIXME: The next line is needed because we are going to probe
- * the interface, but as far as the PM core is concerned the
- * interface is still suspended. The problem wouldn't exist
- * if we could rebind the interface during the interface's own
- * resume() call, but at the time the usb_device isn't locked!
- *
- * The real solution will be to carry this out during the device's
- * complete() callback. Until that is implemented, we have to
- * use this hack.
- */
-// intf->dev.power.sleeping = 0;
-
+ if (intf->needs_binding)
usb_rebind_intf(intf);
- }
break;
}
}
}
}
-#ifdef CONFIG_PM
-
/* Caller has locked udev's pm_mutex */
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
@@ -927,14 +930,14 @@ static int usb_resume_device(struct usb_device *udev)
}
/* Caller has locked intf's usb_device's pm mutex */
-static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
+static int usb_suspend_interface(struct usb_device *udev,
+ struct usb_interface *intf, pm_message_t msg)
{
struct usb_driver *driver;
int status = 0;
/* with no hardware, USB interfaces only use FREEZE and ON states */
- if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED ||
- !is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
goto done;
if (intf->condition == USB_INTERFACE_UNBOUND) /* This can't happen */
@@ -945,7 +948,7 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
status = driver->suspend(intf, msg);
if (status == 0)
mark_quiesced(intf);
- else if (!interface_to_usbdev(intf)->auto_pm)
+ else if (!udev->auto_pm)
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -962,13 +965,13 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
}
/* Caller has locked intf's usb_device's pm_mutex */
-static int usb_resume_interface(struct usb_interface *intf, int reset_resume)
+static int usb_resume_interface(struct usb_device *udev,
+ struct usb_interface *intf, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
- if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED ||
- is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED || is_active(intf))
goto done;
/* Don't let autoresume interfere with unbinding */
@@ -976,8 +979,17 @@ static int usb_resume_interface(struct usb_interface *intf, int reset_resume)
goto done;
/* Can't resume it if it doesn't have a driver. */
- if (intf->condition == USB_INTERFACE_UNBOUND)
+ if (intf->condition == USB_INTERFACE_UNBOUND) {
+
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0 &&
+ intf->dev.power.status == DPM_ON) {
+ usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ intf->needs_altsetting0 = 0;
+ }
goto done;
+ }
/* Don't resume if the interface is marked for rebinding */
if (intf->needs_binding)
@@ -1152,7 +1164,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
if (udev->actconfig) {
for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
- status = usb_suspend_interface(intf, msg);
+ status = usb_suspend_interface(udev, intf, msg);
if (status != 0)
break;
}
@@ -1164,7 +1176,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
if (status != 0) {
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(intf, 0);
+ usb_resume_interface(udev, intf, 0);
}
/* Try another autosuspend when the interfaces aren't busy */
@@ -1277,7 +1289,7 @@ static int usb_resume_both(struct usb_device *udev)
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(intf, udev->reset_resume);
+ usb_resume_interface(udev, intf, udev->reset_resume);
}
}
@@ -1606,12 +1618,10 @@ int usb_external_resume_device(struct usb_device *udev)
return status;
}
-static int usb_suspend(struct device *dev, pm_message_t message)
+int usb_suspend(struct device *dev, pm_message_t message)
{
struct usb_device *udev;
- if (!is_usb_device(dev)) /* Ignore PM for interfaces */
- return 0;
udev = to_usb_device(dev);
/* If udev is already suspended, we can skip this suspend and
@@ -1630,12 +1640,10 @@ static int usb_suspend(struct device *dev, pm_message_t message)
return usb_external_suspend_device(udev, message);
}
-static int usb_resume(struct device *dev)
+int usb_resume(struct device *dev)
{
struct usb_device *udev;
- if (!is_usb_device(dev)) /* Ignore PM for interfaces */
- return 0;
udev = to_usb_device(dev);
/* If udev->skip_sys_resume is set then udev was already suspended
@@ -1647,17 +1655,10 @@ static int usb_resume(struct device *dev)
return usb_external_resume_device(udev);
}
-#else
-
-#define usb_suspend NULL
-#define usb_resume NULL
-
#endif /* CONFIG_PM */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
- .suspend = usb_suspend,
- .resume = usb_resume,
};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f7bfd72..8abd4e5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -924,15 +924,6 @@ static int register_root_hub(struct usb_hcd *hcd)
return retval;
}
-void usb_enable_root_hub_irq (struct usb_bus *bus)
-{
- struct usb_hcd *hcd;
-
- hcd = container_of (bus, struct usb_hcd, self);
- if (hcd->driver->hub_irq_enable && hcd->state != HC_STATE_HALT)
- hcd->driver->hub_irq_enable (hcd);
-}
-
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 5b0b59b..e710ce0 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -212,8 +212,6 @@ struct hc_driver {
int (*bus_suspend)(struct usb_hcd *);
int (*bus_resume)(struct usb_hcd *);
int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
- void (*hub_irq_enable)(struct usb_hcd *);
- /* Needed only if port-change IRQs are level-triggered */
/* force handover of high-speed port to full-speed companion */
void (*relinquish_port)(struct usb_hcd *, int);
@@ -379,8 +377,6 @@ extern struct list_head usb_bus_list;
extern struct mutex usb_bus_list_lock;
extern wait_queue_head_t usb_kill_urb_queue;
-extern void usb_enable_root_hub_irq(struct usb_bus *bus);
-
extern int usb_find_interface_driver(struct usb_device *dev,
struct usb_interface *interface);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 107e1d2..6a5cb01 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2102,8 +2102,6 @@ int usb_port_resume(struct usb_device *udev)
}
clear_bit(port1, hub->busy_bits);
- if (!hub->hdev->parent && !hub->busy_bits[0])
- usb_enable_root_hub_irq(hub->hdev->bus);
status = check_port_resume_type(udev,
hub, port1, status, portchange, portstatus);
@@ -3081,11 +3079,6 @@ static void hub_events(void)
}
}
- /* If this is a root hub, tell the HCD it's okay to
- * re-enable port-change interrupts now. */
- if (!hdev->parent && !hub->busy_bits[0])
- usb_enable_root_hub_irq(hdev->bus);
-
loop_autopm:
/* Allow autosuspend if we're not going to run again */
if (list_empty(&hub->event_list))
@@ -3311,8 +3304,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
break;
}
clear_bit(port1, parent_hub->busy_bits);
- if (!parent_hdev->parent && !parent_hub->busy_bits[0])
- usb_enable_root_hub_irq(parent_hdev->bus);
if (ret < 0)
goto re_enumerate;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 586d6f1..286b443 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- device_del(&interface->dev);
usb_remove_sysfs_intf_files(interface);
+ device_del(&interface->dev);
}
/* Now that the interfaces are unbound, nobody should
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index c0b1ae2..47111e8 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -601,15 +601,20 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
+ unsigned long flags;
- spin_lock_irq(&anchor->lock);
+ spin_lock_irqsave(&anchor->lock, flags);
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev, struct urb,
anchor_list);
+ usb_get_urb(victim);
+ spin_unlock_irqrestore(&anchor->lock, flags);
/* this will unanchor the URB */
usb_unlink_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irqsave(&anchor->lock, flags);
}
- spin_unlock_irq(&anchor->lock);
+ spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 84fcaa6..be1fa07 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -219,12 +219,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif /* CONFIG_HOTPLUG */
-struct device_type usb_device_type = {
- .name = "usb_device",
- .release = usb_release_dev,
- .uevent = usb_dev_uevent,
-};
-
#ifdef CONFIG_PM
static int ksuspend_usb_init(void)
@@ -244,13 +238,80 @@ static void ksuspend_usb_cleanup(void)
destroy_workqueue(ksuspend_usb_wq);
}
+/* USB device Power-Management thunks.
+ * There's no need to distinguish here between quiescing a USB device
+ * and powering it down; the generic_suspend() routine takes care of
+ * it by skipping the usb_port_suspend() call for a quiesce. And for
+ * USB interfaces there's no difference at all.
+ */
+
+static int usb_dev_prepare(struct device *dev)
+{
+ return 0; /* Implement eventually? */
+}
+
+static void usb_dev_complete(struct device *dev)
+{
+ /* Currently used only for rebinding interfaces */
+ usb_resume(dev); /* Implement eventually? */
+}
+
+static int usb_dev_suspend(struct device *dev)
+{
+ return usb_suspend(dev, PMSG_SUSPEND);
+}
+
+static int usb_dev_resume(struct device *dev)
+{
+ return usb_resume(dev);
+}
+
+static int usb_dev_freeze(struct device *dev)
+{
+ return usb_suspend(dev, PMSG_FREEZE);
+}
+
+static int usb_dev_thaw(struct device *dev)
+{
+ return usb_resume(dev);
+}
+
+static int usb_dev_poweroff(struct device *dev)
+{
+ return usb_suspend(dev, PMSG_HIBERNATE);
+}
+
+static int usb_dev_restore(struct device *dev)
+{
+ return usb_resume(dev);
+}
+
+static struct pm_ops usb_device_pm_ops = {
+ .prepare = usb_dev_prepare,
+ .complete = usb_dev_complete,
+ .suspend = usb_dev_suspend,
+ .resume = usb_dev_resume,
+ .freeze = usb_dev_freeze,
+ .thaw = usb_dev_thaw,
+ .poweroff = usb_dev_poweroff,
+ .restore = usb_dev_restore,
+};
+
#else
#define ksuspend_usb_init() 0
#define ksuspend_usb_cleanup() do {} while (0)
+#define usb_device_pm_ops (*(struct pm_ops *)0)
#endif /* CONFIG_PM */
+struct device_type usb_device_type = {
+ .name = "usb_device",
+ .release = usb_release_dev,
+ .uevent = usb_dev_uevent,
+ .pm = &usb_device_pm_ops,
+};
+
/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
static unsigned usb_bus_is_wusb(struct usb_bus *bus)
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index d9a6e16..9a1a45a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -41,6 +41,9 @@ extern void usb_host_cleanup(void);
#ifdef CONFIG_PM
+extern int usb_suspend(struct device *dev, pm_message_t msg);
+extern int usb_resume(struct device *dev);
+
extern void usb_autosuspend_work(struct work_struct *work);
extern int usb_port_suspend(struct usb_device *dev);
extern int usb_port_resume(struct usb_device *dev);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c6a8c6b..acc95b2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -284,6 +284,16 @@ config USB_LH7A40X
default USB_GADGET
select USB_GADGET_SELECTED
+# built in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+ boolean "Inventra HDRC USB Peripheral (TI, ...)"
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SELECTED
+ help
+ This OTG-capable silicon IP is used in dual designs including
+ the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
config USB_GADGET_OMAP
boolean "OMAP USB Device Controller"
depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 1500e1b..abf8192 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -44,7 +44,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 21d1406..7600a0c 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
req->req.context = dum;
req->req.complete = fifo_complete;
+ list_add_tail(&req->queue, &ep->queue);
spin_unlock (&dum->lock);
_req->actual = _req->length;
_req->status = 0;
_req->complete (_ep, _req);
spin_lock (&dum->lock);
- }
- list_add_tail (&req->queue, &ep->queue);
+ } else
+ list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore (&dum->lock, flags);
/* real hardware would likely enable transfers here, in case
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d8faccf..5ee1590 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -47,18 +47,37 @@ struct f_acm {
u8 ctrl_id, data_id;
u8 port_num;
- struct usb_descriptor_header **fs_function;
+ u8 pending;
+
+ /* lock is mostly for pending and notify_req ... they get accessed
+ * by callbacks both from tty (open/close/break) under its spinlock,
+ * and notify_req.complete() which can't use that lock.
+ */
+ spinlock_t lock;
+
struct acm_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct acm_ep_descs hs;
struct usb_ep *notify;
struct usb_endpoint_descriptor *notify_desc;
+ struct usb_request *notify_req;
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
+
+ /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
u16 port_handshake_bits;
-#define RS232_RTS (1 << 1) /* unused with full duplex */
-#define RS232_DTR (1 << 0) /* host is ready for data r/w */
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+
+ /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
};
static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f)
return container_of(f, struct f_acm, port.func);
}
+static inline struct f_acm *port_to_acm(struct gserial *p)
+{
+ return container_of(p, struct f_acm, port);
+}
+
/*-------------------------------------------------------------------------*/
/* notification endpoint uses smallish and infrequent fixed-size messages */
#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
-#define GS_NOTIFY_MAXPACKET 8
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
/* interface and class descriptors: */
@@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
- .bmCapabilities = (1 << 1),
+ .bmCapabilities = USB_CDC_CAP_LINE,
};
static struct usb_cdc_union_desc acm_union_desc __initdata = {
@@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
+ *
+ * Note CDC spec table 4 lists the ACM request profile. It requires
+ * encapsulated command support ... we don't handle any, and respond
+ * to them by stalling. Options include get/set/clear comm features
+ * (not that useful) and SEND_BREAK.
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
@@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
value = 0;
/* FIXME we should not allow data to flow until the
- * host sets the RS232_DTR bit; and when it clears
+ * host sets the ACM_CTRL_DTR bit; and when it clears
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
@@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* we know alt == 0, so this is an activation or a reset */
if (intf == acm->ctrl_id) {
- /* REVISIT this may need more work when we start to
- * send notifications ...
- */
if (acm->notify->driver_data) {
VDBG(cdev, "reset acm control interface %d\n", intf);
usb_ep_disable(acm->notify);
@@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f)
/*-------------------------------------------------------------------------*/
+/**
+ * acm_cdc_notify - issue CDC notification to host
+ * @acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, acm->lock held, acm_notify_req non-null
+ *
+ * Returns zero on sucess or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue: SerialState change.
+ */
+static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = acm->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+
+ req = acm->notify_req;
+ acm->notify_req = NULL;
+ acm->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(acm->ctrl_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(acm->port.func.config->cdev,
+ "acm ttyGS%d can't notify serial state, %d\n",
+ acm->port_num, status);
+ acm->notify_req = req;
+ }
+
+ return status;
+}
+
+static int acm_notify_serial_state(struct f_acm *acm)
+{
+ struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+ int status;
+
+ spin_lock(&acm->lock);
+ if (acm->notify_req) {
+ DBG(cdev, "acm ttyGS%d serial state %04x\n",
+ acm->port_num, acm->serial_state);
+ status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &acm->serial_state, sizeof(acm->serial_state));
+ } else {
+ acm->pending = true;
+ status = 0;
+ }
+ spin_unlock(&acm->lock);
+ return status;
+}
+
+static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_acm *acm = req->context;
+ u8 doit = false;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+ spin_lock(&acm->lock);
+ if (req->status != -ESHUTDOWN)
+ doit = acm->pending;
+ acm->notify_req = req;
+ spin_unlock(&acm->lock);
+
+ if (doit)
+ acm_notify_serial_state(acm);
+}
+
+/* connect == the TTY link is open */
+
+static void acm_connect(struct gserial *port)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ acm_notify_serial_state(acm);
+}
+
+static void acm_disconnect(struct gserial *port)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ acm_notify_serial_state(acm);
+}
+
+static int acm_send_break(struct gserial *port, int duration)
+{
+ struct f_acm *acm = port_to_acm(port);
+ u16 state;
+
+ state = acm->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ acm->serial_state = state;
+ return acm_notify_serial_state(acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
/* ACM function driver setup/binding */
static int __init
acm_bind(struct usb_configuration *c, struct usb_function *f)
@@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm->notify = ep;
ep->driver_data = cdev; /* claim */
+ /* allocate notification */
+ acm->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!acm->notify_req)
+ goto fail;
+
+ acm->notify_req->complete = acm_cdc_notify_complete;
+ acm->notify_req->context = acm;
+
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(acm_fs_function);
+ if (!f->descriptors)
+ goto fail;
acm->fs.in = usb_find_endpoint(acm_fs_function,
f->descriptors, &acm_fs_in_desc);
@@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors, &acm_hs_notify_desc);
}
- /* FIXME provide a callback for triggering notifications */
-
DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
+ if (acm->notify_req)
+ gs_free_req(acm->notify, acm->notify_req);
+
/* we might as well release our claims on endpoints */
if (acm->notify)
acm->notify->driver_data = NULL;
@@ -504,10 +665,13 @@ fail:
static void
acm_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct f_acm *acm = func_to_acm(f);
+
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
- kfree(func_to_acm(f));
+ gs_free_req(acm->notify, acm->notify_req);
+ kfree(acm);
}
/* Some controllers can't support CDC ACM ... */
@@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
if (!acm)
return -ENOMEM;
+ spin_lock_init(&acm->lock);
+
acm->port_num = port_num;
+ acm->port.connect = acm_connect;
+ acm->port.disconnect = acm_disconnect;
+ acm->port.send_break = acm_send_break;
+
acm->port.func.name = "acm";
acm->port.func.strings = acm_strings;
/* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 0822e9d..a2b5c09 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -63,9 +63,7 @@ struct f_ecm {
char ethaddr[14];
- struct usb_descriptor_header **fs_function;
struct ecm_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct ecm_ep_descs hs;
struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 61652f0..659b3d9 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -85,9 +85,7 @@ struct f_rndis {
u8 ethaddr[ETH_ALEN];
int config;
- struct usb_descriptor_header **fs_function;
struct rndis_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct rndis_ep_descs hs;
struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 1b6bde9..fe5674db 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -36,9 +36,7 @@ struct f_gser {
u8 data_id;
u8 port_num;
- struct usb_descriptor_header **fs_function;
struct gser_descs fs;
- struct usb_descriptor_header **hs_function;
struct gser_descs hs;
};
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index afeab9a..acb8d23 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -66,9 +66,7 @@ struct f_gether {
char ethaddr[14];
- struct usb_descriptor_header **fs_function;
struct geth_descs fs;
- struct usb_descriptor_header **hs_function;
struct geth_descs hs;
};
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5246e8f..17d9905 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -11,6 +11,10 @@
* Some are available on 2.4 kernels; several are available, but not
* yet pushed in the 2.6 mainline tree.
*/
+
+#ifndef __GADGET_CHIPS_H
+#define __GADGET_CHIPS_H
+
#ifdef CONFIG_USB_GADGET_NET2280
#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
#else
@@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
/* Everything else is *presumably* fine ... */
return true;
}
+
+#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 376e80c..574c538 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -54,6 +54,7 @@
#include <mach/dma.h>
#include <mach/usb.h>
+#include <mach/control.h>
#include "omap_udc.h"
@@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s)
u32 trans;
char *ctrl_name;
- tmp = OTG_REV_REG;
+ tmp = omap_readl(OTG_REV);
if (cpu_is_omap24xx()) {
ctrl_name = "control_devconf";
- trans = CONTROL_DEVCONF_REG;
+ trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
} else {
ctrl_name = "tranceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index a28513e..7cbc78a 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1622,7 +1622,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
struct pxa_udc *udc = the_controller;
int retval;
- if (!driver || driver->speed != USB_SPEED_FULL || !driver->bind
+ if (!driver || driver->speed < USB_SPEED_FULL || !driver->bind
|| !driver->disconnect || !driver->setup)
return -EINVAL;
if (!udc)
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 5388073..29d13eb 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -35,7 +35,6 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/version.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index abf9505..53d5928 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -52,13 +52,16 @@
* is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
*/
+#define PREFIX "ttyGS"
+
/*
* gserial is the lifecycle interface, used by USB functions
* gs_port is the I/O nexus, used by the tty driver
* tty_struct links to the tty/filesystem framework
*
* gserial <---> gs_port ... links will be null when the USB link is
- * inactive; managed by gserial_{connect,disconnect}().
+ * inactive; managed by gserial_{connect,disconnect}(). each gserial
+ * instance can wrap its own USB control protocol.
* gserial->ioport == usb_ep->driver_data ... gs_port
* gs_port->port_usb ... gserial
*
@@ -100,6 +103,8 @@ struct gs_port {
wait_queue_head_t close_wait; /* wait for last close */
struct list_head read_pool;
+ struct list_head read_queue;
+ unsigned n_read;
struct tasklet_struct push;
struct list_head write_pool;
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb)
/*
* gs_buf_data_avail
*
- * Return the number of bytes of data available in the circular
+ * Return the number of bytes of data written into the circular
* buffer.
*/
static unsigned gs_buf_data_avail(struct gs_buf *gb)
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
* Allocate a usb_request and its buffer. Returns a pointer to the
* usb_request or NULL if there is an error.
*/
-static struct usb_request *
+struct usb_request *
gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct usb_request *req;
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
*
* Free a usb_request and its buffer.
*/
-static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+void gs_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
@@ -367,11 +372,9 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
-#ifdef VERBOSE_DEBUG
- pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n",
- __func__, in->name, len, *((u8 *)req->buf),
+ pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
+ port->port_num, len, *((u8 *)req->buf),
*((u8 *)req->buf+1), *((u8 *)req->buf+2));
-#endif
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
@@ -401,56 +404,6 @@ __acquires(&port->port_lock)
return status;
}
-static void gs_rx_push(unsigned long _port)
-{
- struct gs_port *port = (void *)_port;
- struct tty_struct *tty = port->port_tty;
-
- /* With low_latency, tty_flip_buffer_push() doesn't put its
- * real work through a workqueue, so the ldisc has a better
- * chance to keep up with peak USB data rates.
- */
- if (tty) {
- tty_flip_buffer_push(tty);
- wake_up_interruptible(&tty->read_wait);
- }
-}
-
-/*
- * gs_recv_packet
- *
- * Called for each USB packet received. Reads the packet
- * header and stuffs the data in the appropriate tty buffer.
- * Returns 0 if successful, or a negative error number.
- *
- * Called during USB completion routine, on interrupt time.
- * With port_lock.
- */
-static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size)
-{
- unsigned len;
- struct tty_struct *tty;
-
- /* I/O completions can continue for a while after close(), until the
- * request queue empties. Just discard any data we receive, until
- * something reopens this TTY ... as if there were no HW flow control.
- */
- tty = port->port_tty;
- if (tty == NULL) {
- pr_vdebug("%s: ttyGS%d, after close\n",
- __func__, port->port_num);
- return -EIO;
- }
-
- len = tty_insert_flip_string(tty, packet, size);
- if (len > 0)
- tasklet_schedule(&port->push);
- if (len < size)
- pr_debug("%s: ttyGS%d, drop %d bytes\n",
- __func__, port->port_num, size - len);
- return 0;
-}
-
/*
* Context: caller owns port_lock, and port_usb is set
*/
@@ -469,9 +422,9 @@ __acquires(&port->port_lock)
int status;
struct tty_struct *tty;
- /* no more rx if closed or throttled */
+ /* no more rx if closed */
tty = port->port_tty;
- if (!tty || test_bit(TTY_THROTTLED, &tty->flags))
+ if (!tty)
break;
req = list_entry(pool->next, struct usb_request, list);
@@ -500,36 +453,134 @@ __acquires(&port->port_lock)
return started;
}
-static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+/*
+ * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * layer until it refuses to take any more data (or is throttled back).
+ * Then it issues reads for any further data.
+ *
+ * If the RX queue becomes full enough that no usb_request is queued,
+ * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
+ * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
+ * can be buffered before the TTY layer's buffers (currently 64 KB).
+ */
+static void gs_rx_push(unsigned long _port)
{
- int status;
- struct gs_port *port = ep->driver_data;
+ struct gs_port *port = (void *)_port;
+ struct tty_struct *tty;
+ struct list_head *queue = &port->read_queue;
+ bool disconnect = false;
+ bool do_push = false;
- spin_lock(&port->port_lock);
- list_add(&req->list, &port->read_pool);
+ /* hand any queued data to the tty */
+ spin_lock_irq(&port->port_lock);
+ tty = port->port_tty;
+ while (!list_empty(queue)) {
+ struct usb_request *req;
- switch (req->status) {
- case 0:
- /* normal completion */
- status = gs_recv_packet(port, req->buf, req->actual);
- if (status && status != -EIO)
- pr_debug("%s: %s %s err %d\n",
- __func__, "recv", ep->name, status);
- gs_start_rx(port);
- break;
+ req = list_first_entry(queue, struct usb_request, list);
- case -ESHUTDOWN:
- /* disconnect */
- pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
- break;
+ /* discard data if tty was closed */
+ if (!tty)
+ goto recycle;
- default:
- /* presumably a transient fault */
- pr_warning("%s: unexpected %s status %d\n",
- __func__, ep->name, req->status);
- gs_start_rx(port);
- break;
+ /* leave data queued if tty was rx throttled */
+ if (test_bit(TTY_THROTTLED, &tty->flags))
+ break;
+
+ switch (req->status) {
+ case -ESHUTDOWN:
+ disconnect = true;
+ pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
+ break;
+
+ default:
+ /* presumably a transient fault */
+ pr_warning(PREFIX "%d: unexpected RX status %d\n",
+ port->port_num, req->status);
+ /* FALLTHROUGH */
+ case 0:
+ /* normal completion */
+ break;
+ }
+
+ /* push data to (open) tty */
+ if (req->actual) {
+ char *packet = req->buf;
+ unsigned size = req->actual;
+ unsigned n;
+ int count;
+
+ /* we may have pushed part of this packet already... */
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(tty, packet, size);
+ if (count)
+ do_push = true;
+ if (count != size) {
+ /* stop pushing; TTY layer can't handle more */
+ port->n_read += count;
+ pr_vdebug(PREFIX "%d: rx block %d/%d\n",
+ port->port_num,
+ count, req->actual);
+ break;
+ }
+ port->n_read = 0;
+ }
+recycle:
+ list_move(&req->list, &port->read_pool);
}
+
+ /* Push from tty to ldisc; this is immediate with low_latency, and
+ * may trigger callbacks to this driver ... so drop the spinlock.
+ */
+ if (tty && do_push) {
+ spin_unlock_irq(&port->port_lock);
+ tty_flip_buffer_push(tty);
+ wake_up_interruptible(&tty->read_wait);
+ spin_lock_irq(&port->port_lock);
+
+ /* tty may have been closed */
+ tty = port->port_tty;
+ }
+
+
+ /* We want our data queue to become empty ASAP, keeping data
+ * in the tty and ldisc (not here). If we couldn't push any
+ * this time around, there may be trouble unless there's an
+ * implicit tty_unthrottle() call on its way...
+ *
+ * REVISIT we should probably add a timer to keep the tasklet
+ * from starving ... but it's not clear that case ever happens.
+ */
+ if (!list_empty(queue) && tty) {
+ if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (do_push)
+ tasklet_schedule(&port->push);
+ else
+ pr_warning(PREFIX "%d: RX not scheduled?\n",
+ port->port_num);
+ }
+ }
+
+ /* If we're still connected, refill the USB RX queue. */
+ if (!disconnect && port->port_usb)
+ gs_start_rx(port);
+
+ spin_unlock_irq(&port->port_lock);
+}
+
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gs_port *port = ep->driver_data;
+
+ /* Queue all received data until the tty layer is ready for it. */
+ spin_lock(&port->port_lock);
+ list_add_tail(&req->list, &port->read_queue);
+ tasklet_schedule(&port->push);
spin_unlock(&port->port_lock);
}
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port)
}
/* queue read requests */
+ port->n_read = 0;
started = gs_start_rx(port);
/* unblock any pending writes into our circular buffer */
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port)
} else {
gs_free_requests(ep, head);
gs_free_requests(port->port_usb->in, &port->write_pool);
+ status = -EIO;
}
- return started ? 0 : status;
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file)
/* if connected, start the I/O stream */
if (port->port_usb) {
+ struct gserial *gser = port->port_usb;
+
pr_debug("gs_open: start ttyGS%d\n", port->port_num);
gs_start_io(port);
- /* REVISIT for ACM, issue "network connected" event */
+ if (gser->connect)
+ gser->connect(gser);
}
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p)
static void gs_close(struct tty_struct *tty, struct file *file)
{
struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
spin_lock_irq(&port->port_lock);
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file)
port->openclose = true;
port->open_count = 0;
- if (port->port_usb)
- /* REVISIT for ACM, issue "network disconnected" event */;
+ gser = port->port_usb;
+ if (gser && gser->disconnect)
+ gser->disconnect(gser);
/* wait for circular write buffer to drain, disconnect, or at
* most GS_CLOSE_TIMEOUT seconds; then discard the rest
*/
- if (gs_buf_data_avail(&port->port_write_buf) > 0
- && port->port_usb) {
+ if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
gs_writes_finished(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
}
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
* let the push tasklet fire again until we're re-opened.
*/
- if (port->port_usb == NULL)
+ if (gser == NULL)
gs_buf_free(&port->port_write_buf);
else
gs_buf_clear(&port->port_write_buf);
- tasklet_kill(&port->push);
-
tty->driver_data = NULL;
port->port_tty = NULL;
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- unsigned started = 0;
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port_usb)
- started = gs_start_rx(port);
+ if (port->port_usb) {
+ /* Kickstart read queue processing. We don't do xon/xoff,
+ * rts/cts, or other handshaking with the host, but if the
+ * read queue backs up enough we'll be NAKing OUT packets.
+ */
+ tasklet_schedule(&port->push);
+ pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
+ }
spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_break_ctl(struct tty_struct *tty, int duration)
+{
+ struct gs_port *port = tty->driver_data;
+ int status = 0;
+ struct gserial *gser;
+
+ pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
+ port->port_num, duration);
- pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n",
- port->port_num, started);
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (gser && gser->send_break)
+ status = gser->send_break(gser, duration);
+ spin_unlock_irq(&port->port_lock);
+
+ return status;
}
static const struct tty_operations gs_tty_ops = {
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = {
.write_room = gs_write_room,
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
+ .break_ctl = gs_break_ctl,
};
/*-------------------------------------------------------------------------*/
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
port->port_num = port_num;
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
gs_tty_driver->owner = THIS_MODULE;
gs_tty_driver->driver_name = "g_serial";
- gs_tty_driver->name = "ttyGS";
+ gs_tty_driver->name = PREFIX;
/* uses dynamically assigned dev_t values */
gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void)
ports[i].port = NULL;
mutex_unlock(&ports[i].lock);
+ tasklet_kill(&port->push);
+
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num)
/* REVISIT if waiting on "carrier detect", signal. */
- /* REVISIT for ACM, issue "network connection" status notification:
- * connected if open_count, else disconnected.
+ /* if it's already open, start I/O ... and notify the serial
+ * protocol about open/close status (connect/disconnect).
*/
-
- /* if it's already open, start I/O */
if (port->open_count) {
pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
gs_start_io(port);
+ if (gser->connect)
+ gser->connect(gser);
+ } else {
+ if (gser->disconnect)
+ gser->disconnect(gser);
}
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser)
if (port->open_count == 0 && !port->openclose)
gs_buf_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool);
+ gs_free_requests(gser->out, &port->read_queue);
gs_free_requests(gser->in, &port->write_pool);
spin_unlock_irqrestore(&port->port_lock, flags);
}
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 7b56113..af3910d 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -23,8 +23,7 @@
* style I/O using the USB peripheral endpoints listed here, including
* hookups to sysfs and /dev for each logical "tty" device.
*
- * REVISIT need TTY --> USB event flow too, so ACM can report open/close
- * as carrier detect events. Model after ECM. There's more ACM state too.
+ * REVISIT at least ACM could support tiocmget() if needed.
*
* REVISIT someday, allow multiplexing several TTYs over these endpoints.
*/
@@ -41,8 +40,17 @@ struct gserial {
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+
+ /* notification callbacks */
+ void (*connect)(struct gserial *p);
+ void (*disconnect)(struct gserial *p);
+ int (*send_break)(struct gserial *p, int duration);
};
+/* utilities to allocate/free request and buffer */
+struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
+void gs_free_req(struct usb_ep *, struct usb_request *req);
+
/* port setup/teardown is handled by gadget driver */
int gserial_setup(struct usb_gadget *g, unsigned n_ports);
void gserial_cleanup(void);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 5fbdc14..5416cf9 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
-#include <asm/plat-orion/ehci-orion.h>
+#include <plat/ehci-orion.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c858f2a..8017f1c 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
* doesn't quite work because some people have to enforce 32-bit access
*/
static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
- __u32 __iomem *dst, u32 offset, u32 len)
+ __u32 __iomem *dst, u32 len)
{
- struct usb_hcd *hcd = priv_to_hcd(priv);
u32 val;
u8 *buff8;
@@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
return;
}
- isp1760_writel(offset, hcd->regs + HC_MEMORY_REG);
- /* XXX
- * 90nsec delay, the spec says something how this could be avoided.
- */
- mdelay(1);
while (len >= 4) {
*src = __raw_readl(dst);
@@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
printk(KERN_ERR "qh is 0\n");
continue;
}
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs,
- atl_regs, sizeof(ptd));
+ isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
+ HC_MEMORY_REG);
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+ /*
+ * write bank1 address twice to ensure the 90ns delay (time
+ * between BANK0 write and the priv_read_copy() call is at
+ * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns)
+ */
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+
+ priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
+ ISP_BANK(0), sizeof(ptd));
dw1 = le32_to_cpu(ptd.dw1);
dw2 = le32_to_cpu(ptd.dw2);
@@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
case IN_PID:
priv_read_copy(priv,
priv->atl_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload, payload,
+ usb_hcd->regs + payload + ISP_BANK(1),
length);
case OUT_PID:
@@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
} else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
/* short BULK received */
- printk(KERN_ERR "short bulk, %d instead %zu\n", length,
- qtd->length);
if (urb->transfer_flags & URB_SHORT_NOT_OK) {
urb->status = -EREMOTEIO;
- printk(KERN_ERR "not okey\n");
+ isp1760_dbg(priv, "short bulk, %d instead %zu "
+ "with URB_SHORT_NOT_OK flag.\n",
+ length, qtd->length);
}
if (urb->status == -EINPROGRESS)
@@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
continue;
}
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs,
- int_regs, sizeof(ptd));
+ isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
+ HC_MEMORY_REG);
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+ /*
+ * write bank1 address twice to ensure the 90ns delay (time
+ * between BANK0 write and the priv_read_copy() call is at
+ * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
+ */
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+
+ priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
+ ISP_BANK(0), sizeof(ptd));
dw1 = le32_to_cpu(ptd.dw1);
dw3 = le32_to_cpu(ptd.dw3);
check_int_err_status(le32_to_cpu(ptd.dw4));
@@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
case IN_PID:
priv_read_copy(priv,
priv->int_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload , payload,
+ usb_hcd->regs + payload + ISP_BANK(1),
length);
case OUT_PID:
@@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -EPIPE;
}
- isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
- return 0;
+ return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
}
static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6473dd8..4377277 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -54,6 +54,8 @@ void deinit_kmem_cache(void);
#define BUFFER_MAP 0x7
#define HC_MEMORY_REG 0x33c
+#define ISP_BANK(x) ((x) << 16)
+
#define HC_PORT1_CTRL 0x374
#define PORT1_POWER (3 << 3)
#define PORT1_INIT1 (1 << 7)
@@ -119,6 +121,9 @@ struct inter_packet_info {
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
+#define isp1760_dbg(priv, fmt, args...) \
+ dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
+
#define isp1760_info(priv, fmt, args...) \
dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 6db7a28..4ed228a 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -260,7 +260,6 @@ static const struct hc_driver ohci_at91_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index c094800..2ac4e02 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -163,7 +163,6 @@ static const struct hc_driver ohci_au1xxx_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index cb0b506..fb3055f 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -134,7 +134,6 @@ static struct hc_driver ohci_ep93xx_hc_driver = {
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 26bc479..8990196 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd);
static int ohci_restart (struct ohci_hcd *ohci);
#endif
+#ifdef CONFIG_PCI
+static void quirk_amd_pll(int state);
+static void amd_iso_dev_put(void);
+#else
+static inline void quirk_amd_pll(int state)
+{
+ return;
+}
+static inline void amd_iso_dev_put(void)
+{
+ return;
+}
+#endif
+
+
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
@@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci)
int ret;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
+ if (distrust_firmware)
+ ohci->flags |= OHCI_QUIRK_HUB_POWER;
+
disable (ohci);
ohci->regs = hcd->regs;
@@ -689,7 +707,8 @@ retry:
temp |= RH_A_NOCP;
temp &= ~(RH_A_POTPGT | RH_A_NPS);
ohci_writel (ohci, temp, &ohci->regs->roothub.a);
- } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
+ } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+ (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
@@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
+ if (quirk_amdiso(ohci))
+ amd_iso_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index b567392..7ea9a7b 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -36,18 +36,6 @@
/*-------------------------------------------------------------------------*/
-/* hcd->hub_irq_enable() */
-static void ohci_rhsc_enable (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
-
- spin_lock_irq(&ohci->lock);
- if (!ohci->autostop)
- del_timer(&hcd->rh_timer); /* Prevent next poll */
- ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
- spin_unlock_irq(&ohci->lock);
-}
-
#define OHCI_SCHED_ENABLES \
(OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
@@ -374,18 +362,28 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected)
{
int poll_rh = 1;
+ int rhsc;
+ rhsc = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC;
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
- /* keep on polling until we know a device is connected
- * and RHSC is enabled */
+ /* If no status changes are pending, enable status-change
+ * interrupts.
+ */
+ if (!rhsc && !changed) {
+ rhsc = OHCI_INTR_RHSC;
+ ohci_writel(ohci, rhsc, &ohci->regs->intrenable);
+ }
+
+ /* Keep on polling until we know a device is connected
+ * and RHSC is enabled, or until we autostop.
+ */
if (!ohci->autostop) {
if (any_connected ||
!device_may_wakeup(&ohci_to_hcd(ohci)
->self.root_hub->dev)) {
- if (ohci_readl(ohci, &ohci->regs->intrenable) &
- OHCI_INTR_RHSC)
+ if (rhsc)
poll_rh = 0;
} else {
ohci->autostop = 1;
@@ -398,12 +396,13 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
ohci->autostop = 0;
ohci->next_statechange = jiffies +
STATECHANGE_DELAY;
- } else if (time_after_eq(jiffies,
+ } else if (rhsc && time_after_eq(jiffies,
ohci->next_statechange)
&& !ohci->ed_rm_list
&& !(ohci->hc_control &
OHCI_SCHED_ENABLES)) {
ohci_rh_suspend(ohci, 1);
+ poll_rh = 0;
}
}
break;
@@ -417,6 +416,12 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
else
usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
} else {
+ if (!rhsc && (ohci->autostop ||
+ ohci_to_hcd(ohci)->self.root_hub->
+ do_remote_wakeup))
+ ohci_writel(ohci, OHCI_INTR_RHSC,
+ &ohci->regs->intrenable);
+
/* everything is idle, no need for polling */
poll_rh = 0;
}
@@ -438,12 +443,16 @@ static inline int ohci_rh_resume(struct ohci_hcd *ohci)
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected)
{
- int poll_rh = 1;
-
- /* keep on polling until RHSC is enabled */
+ /* If RHSC is enabled, don't poll */
if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
- poll_rh = 0;
- return poll_rh;
+ return 0;
+
+ /* If no status changes are pending, enable status-change interrupts */
+ if (!changed) {
+ ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
+ return 0;
+ }
+ return 1;
}
#endif /* CONFIG_PM */
@@ -483,6 +492,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
length++;
}
+ /* Some broken controllers never turn off RHCS in the interrupt
+ * status register. For their sake we won't re-enable RHSC
+ * interrupts if the flag is already set.
+ */
+ if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
+ changed = 1;
+
/* look at each port */
for (i = 0; i < ohci->num_ports; i++) {
u32 status = roothub_portstatus (ohci, i);
@@ -572,8 +588,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
return 0;
}
-static void start_hnp(struct ohci_hcd *ohci);
-
#else
#define ohci_start_port_reset NULL
@@ -760,7 +774,7 @@ static int ohci_hub_control (
#ifdef CONFIG_USB_OTG
if (hcd->self.otg_port == (wIndex + 1)
&& hcd->self.b_hnp_enable)
- start_hnp(ohci);
+ ohci->start_hnp(ohci);
else
#endif
ohci_writel (ohci, RH_PS_PSS,
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 9e31d44..de42283 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -193,7 +193,6 @@ static const struct hc_driver ohci_lh7a404_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 94dfca0..1eb64d0 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
dev_err(hcd->self.controller, "can't find transceiver\n");
return -ENODEV;
}
+ ohci->start_hnp = start_hnp;
}
#endif
@@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
omap_cfg_reg(W4_USB_HIGHZ);
}
ohci_writel(ohci, rh, &ohci->regs->roothub.a);
- distrust_firmware = 0;
+ ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
} else if (machine_is_nokia770()) {
/* We require a self-powered hub, which should have
* plenty of power. */
@@ -469,7 +470,6 @@ static const struct hc_driver ohci_omap_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 4696cc9..a9c2ae3 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,6 +18,28 @@
#error "This file is PCI bus glue. CONFIG_PCI must be defined."
#endif
+#include <linux/pci.h>
+#include <linux/io.h>
+
+
+/* constants used to work around PM-related transfer
+ * glitches in some AMD 700 series southbridges
+ */
+#define AB_REG_BAR 0xf0
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define AX_INDXC 0X30
+#define AX_DATAC 0x34
+
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define PCIE_P_CNTL 0x10040
+#define BIF_NB 0x10002
+
+static struct pci_dev *amd_smbus_dev;
+static struct pci_dev *amd_hb_dev;
+static int amd_ohci_iso_count;
+
/*-------------------------------------------------------------------------*/
static int broken_suspend(struct usb_hcd *hcd)
@@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
return 0;
}
+static int ohci_quirk_amd700(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ u8 rev = 0;
+
+ if (!amd_smbus_dev)
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ if (!amd_smbus_dev)
+ return 0;
+
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if ((rev > 0x3b) || (rev < 0x30)) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+
+ amd_ohci_iso_count++;
+
+ if (!amd_hb_dev)
+ amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
+
+ ohci->flags |= OHCI_QUIRK_AMD_ISO;
+ ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
+
+ return 0;
+}
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * Assume the system is configured to have USB 1.1 ISO transfers going
+ * to or from a USB device. Without this quirk, that stream may stutter
+ * or have breaks occasionally. For transfers going to speakers, this
+ * makes a very audible mess...
+ *
+ * That audio playback corruption is due to the audio stream getting
+ * interrupted occasionally when the link goes in lower power state
+ * This USB quirk prevents the link going into that lower power state
+ * during audio playback or other ISO operations.
+ */
+static void quirk_amd_pll(int on)
+{
+ u32 addr;
+ u32 val;
+ u32 bit = (on > 0) ? 1 : 0;
+
+ pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
+
+ /* BIT names/meanings are NDA-protected, sorry ... */
+
+ outl(AX_INDXC, AB_INDX(addr));
+ outl(0x40, AB_DATA(addr));
+ outl(AX_DATAC, AB_INDX(addr));
+ val = inl(AB_DATA(addr));
+ val &= ~((1 << 3) | (1 << 4) | (1 << 9));
+ val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
+ outl(val, AB_DATA(addr));
+
+ if (amd_hb_dev) {
+ addr = PCIE_P_CNTL;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+ pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+ val |= bit | (bit << 3) | (bit << 12);
+ val |= ((!bit) << 4) | ((!bit) << 9);
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+
+ addr = BIF_NB;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+ pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 << 8);
+ val |= bit << 8;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+ }
+}
+
+static void amd_iso_dev_put(void)
+{
+ amd_ohci_iso_count--;
+ if (amd_ohci_iso_count == 0) {
+ if (amd_smbus_dev) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ }
+ if (amd_hb_dev) {
+ pci_dev_put(amd_hb_dev);
+ amd_hb_dev = NULL;
+ }
+ }
+
+}
+
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
@@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
.driver_data = (unsigned long) broken_suspend,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
*/
@@ -327,7 +459,6 @@ static const struct hc_driver ohci_pci_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index b02cd07..658a2a9 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -277,7 +277,6 @@ static const struct hc_driver ohci_pnx4008_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c
index 605d59c..28467e2 100644
--- a/drivers/usb/host/ohci-pnx8550.c
+++ b/drivers/usb/host/ohci-pnx8550.c
@@ -201,7 +201,6 @@ static const struct hc_driver ohci_pnx8550_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 91e6e10..7ac5326 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -72,7 +72,6 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index 523c301..cd3398b 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -172,7 +172,6 @@ static const struct hc_driver ohci_ppc_soc_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 55c9564..2089d8a 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -68,7 +68,6 @@ static const struct hc_driver ps3_ohci_hc_driver = {
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
.start_port_reset = ohci_start_port_reset,
#if defined(CONFIG_PM)
.bus_suspend = ohci_bus_suspend,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 8c9c484..7f0f35c 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -298,7 +298,6 @@ static const struct hc_driver ohci_pxa27x_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 6a9b4c5..c2d80f8 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,6 +49,9 @@ __acquires(ohci->lock)
switch (usb_pipetype (urb->pipe)) {
case PIPE_ISOCHRONOUS:
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
+ if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+ && quirk_amdiso(ohci))
+ quirk_amd_pll(1);
break;
case PIPE_INTERRUPT:
ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -677,6 +680,9 @@ static void td_submit_urb (
data + urb->iso_frame_desc [cnt].offset,
urb->iso_frame_desc [cnt].length, urb, cnt);
}
+ if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+ && quirk_amdiso(ohci))
+ quirk_amd_pll(0);
periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
break;
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 9e3dc40..f46af7a 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -466,7 +466,6 @@ static const struct hc_driver ohci_s3c2410_hc_driver = {
*/
.hub_status_data = ohci_s3c2410_hub_status_data,
.hub_control = ohci_s3c2410_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 4626b00..e4bbe8e 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -231,7 +231,6 @@ static const struct hc_driver ohci_sa1111_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index e7ee607..60f03cc 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -68,7 +68,6 @@ static const struct hc_driver ohci_sh_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 21b164e..cff2363 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -75,7 +75,6 @@ static const struct hc_driver ohci_sm501_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
index 3660c83..23fd6a8 100644
--- a/drivers/usb/host/ohci-ssb.c
+++ b/drivers/usb/host/ohci-ssb.c
@@ -81,7 +81,6 @@ static const struct hc_driver ssb_ohci_hc_driver = {
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index dc544dd..faf622e 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -371,6 +371,7 @@ struct ohci_hcd {
* other external transceivers should be software-transparent
*/
struct otg_transceiver *transceiver;
+ void (*start_hnp)(struct ohci_hcd *ohci);
/*
* memory management for queue data structures
@@ -399,6 +400,8 @@ struct ohci_hcd {
#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
+#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
+#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
@@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
{
return ohci->flags & OHCI_QUIRK_ZFMICRO;
}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+ return ohci->flags & OHCI_QUIRK_AMD_ISO;
+}
#else
static inline int quirk_nec(struct ohci_hcd *ohci)
{
@@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
{
return 0;
}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+ return 0;
+}
#endif
/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d5f02dd..ea7126f 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
disable_irq_nrdy(r8a66597, pipenum);
}
+static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
+{
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
+}
+
+static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
+ int connect)
+{
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+ rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+ rh->scount = R8A66597_MAX_SAMPLING;
+ if (connect)
+ rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
+ else
+ rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
+ rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
+
+ r8a66597_root_hub_start_polling(r8a66597);
+}
+
/* this function must be called with interrupt disabled */
static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
u16 syssts)
{
if (syssts == SE0) {
+ r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
return;
}
@@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
{
struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
- r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
- r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
-
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev);
- r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+ start_root_hub_sampling(r8a66597, port, 0);
}
/* this function must be called with interrupt disabled */
@@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
}
}
-static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
-{
- mod_timer(&r8a66597->rh_timer,
- jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
-}
-
-static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
-{
- struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
-
- rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
- rh->scount = R8A66597_MAX_SAMPLING;
- r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
- | (1 << USB_PORT_FEAT_C_CONNECTION);
- r8a66597_root_hub_start_polling(r8a66597);
-}
-
static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
@@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
/* start usb bus sampling */
- start_root_hub_sampling(r8a66597, 1);
+ start_root_hub_sampling(r8a66597, 1, 1);
}
if (mask2 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS2);
@@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
/* start usb bus sampling */
- start_root_hub_sampling(r8a66597, 0);
+ start_root_hub_sampling(r8a66597, 0, 1);
}
if (mask1 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS1);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 20ad3c4..228f2b0 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2934,16 +2934,6 @@ static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
return 0;
}
-static void u132_hub_irq_enable(struct usb_hcd *hcd)
-{
- struct u132 *u132 = hcd_to_u132(hcd);
- if (u132->going > 1) {
- dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
- , u132->going);
- } else if (u132->going > 0)
- dev_err(&u132->platform_dev->dev, "device is being removed\n");
-}
-
#ifdef CONFIG_PM
static int u132_bus_suspend(struct usb_hcd *hcd)
@@ -2995,7 +2985,6 @@ static struct hc_driver u132_hc_driver = {
.bus_suspend = u132_bus_suspend,
.bus_resume = u132_bus_resume,
.start_port_reset = u132_start_port_reset,
- .hub_irq_enable = u132_hub_irq_enable,
};
/*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 001789c..4ea50e0 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,16 +42,6 @@ config USB_ADUTUX
To compile this driver as a module, choose M here. The module
will be called adutux.
-config USB_AUERSWALD
- tristate "USB Auerswald ISDN support"
- depends on USB
- help
- Say Y here if you want to connect an Auerswald USB ISDN Device
- to your computer's USB port.
-
- To compile this driver as a module, choose M here: the
- module will be called auerswald.
-
config USB_RIO500
tristate "USB Diamond Rio500 support"
depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index aba091c..45b4e12 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
-obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
obj-$(CONFIG_USB_CYTHERM) += cytherm.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
deleted file mode 100644
index d2f61d5..0000000
--- a/drivers/usb/misc/auerswald.c
+++ /dev/null
@@ -1,2152 +0,0 @@
-/*****************************************************************************/
-/*
- * auerswald.c -- Auerswald PBX/System Telephone usb driver.
- *
- * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de)
- *
- * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
- * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
- /*****************************************************************************/
-
-/* Standard Linux module include files */
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-
-/*-------------------------------------------------------------------*/
-/* Debug support */
-#ifdef DEBUG
-#define dump( adr, len) \
-do { \
- unsigned int u; \
- printk (KERN_DEBUG); \
- for (u = 0; u < len; u++) \
- printk (" %02X", adr[u] & 0xFF); \
- printk ("\n"); \
-} while (0)
-#else
-#define dump( adr, len)
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Version Information */
-#define DRIVER_VERSION "0.9.11"
-#define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>"
-#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
-
-/*-------------------------------------------------------------------*/
-/* Private declarations for Auerswald USB driver */
-
-/* Auerswald Vendor ID */
-#define ID_AUERSWALD 0x09BF
-
-#define AUER_MINOR_BASE 112 /* auerswald driver minor number */
-
-/* we can have up to this number of device plugged in at once */
-#define AUER_MAX_DEVICES 16
-
-
-/* Number of read buffers for each device */
-#define AU_RBUFFERS 10
-
-/* Number of chain elements for each control chain */
-#define AUCH_ELEMENTS 20
-
-/* Number of retries in communication */
-#define AU_RETRIES 10
-
-/*-------------------------------------------------------------------*/
-/* vendor specific protocol */
-/* Header Byte */
-#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
-#define AUH_DIRECT 0x00 /* data is for USB device */
-#define AUH_INDIRECT 0x80 /* USB device is relay */
-
-#define AUH_SPLITMASK 0x40 /* mask for split bit */
-#define AUH_UNSPLIT 0x00 /* data block is full-size */
-#define AUH_SPLIT 0x40 /* data block is part of a larger one,
- split-byte follows */
-
-#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
-#define AUH_TYPESIZE 0x40 /* different types */
-#define AUH_DCHANNEL 0x00 /* D channel data */
-#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
-#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
-/* 0x03..0x0F reserved for driver internal use */
-#define AUH_COMMAND 0x10 /* Command channel */
-#define AUH_BPROT 0x11 /* Configuration block protocol */
-#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
-#define AUH_TAPI 0x13 /* telephone api data (ATD) */
-/* 0x14..0x3F reserved for other protocols */
-#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
-#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
-
-#define AUH_SIZE 1 /* Size of Header Byte */
-
-/* Split Byte. Only present if split bit in header byte set.*/
-#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
-#define AUS_FIRST 0x80 /* first block */
-#define AUS_FOLLOW 0x00 /* following block */
-
-#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
-#define AUS_END 0x40 /* last block */
-#define AUS_NOEND 0x00 /* not the last block */
-
-#define AUS_LENMASK 0x3F /* mask for block length information */
-
-/* Request types */
-#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
-#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
-
-/* Vendor Requests */
-#define AUV_GETINFO 0x00 /* GetDeviceInfo */
-#define AUV_WBLOCK 0x01 /* Write Block */
-#define AUV_RBLOCK 0x02 /* Read Block */
-#define AUV_CHANNELCTL 0x03 /* Channel Control */
-#define AUV_DUMMY 0x04 /* Dummy Out for retry */
-
-/* Device Info Types */
-#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
-#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
-#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
-
-/* Interrupt endpoint definitions */
-#define AU_IRQENDP 1 /* Endpoint number */
-#define AU_IRQCMDID 16 /* Command-block ID */
-#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
-#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
-
-/* Device String Descriptors */
-#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
-#define AUSI_DEVICE 2 /* Name of the Device */
-#define AUSI_SERIALNR 3 /* Serial Number */
-#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
-
-#define AUSI_DLEN 100 /* Max. Length of Device Description */
-
-#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
-
-/*-------------------------------------------------------------------*/
-/* External data structures / Interface */
-typedef struct
-{
- char __user *buf; /* return buffer for string contents */
- unsigned int bsize; /* size of return buffer */
-} audevinfo_t,*paudevinfo_t;
-
-/* IO controls */
-#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
-#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
-#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
-#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
-#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
-#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
-#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
-/* 'U' 0xF7..0xFF reseved */
-
-/*-------------------------------------------------------------------*/
-/* Internal data structures */
-
-/* ..................................................................*/
-/* urb chain element */
-struct auerchain; /* forward for circular reference */
-typedef struct
-{
- struct auerchain *chain; /* pointer to the chain to which this element belongs */
- struct urb * urbp; /* pointer to attached urb */
- void *context; /* saved URB context */
- usb_complete_t complete; /* saved URB completion function */
- struct list_head list; /* to include element into a list */
-} auerchainelement_t,*pauerchainelement_t;
-
-/* urb chain */
-typedef struct auerchain
-{
- pauerchainelement_t active; /* element which is submitted to urb */
- spinlock_t lock; /* protection agains interrupts */
- struct list_head waiting_list; /* list of waiting elements */
- struct list_head free_list; /* list of available elements */
-} auerchain_t,*pauerchain_t;
-
-/* urb blocking completion helper struct */
-typedef struct
-{
- wait_queue_head_t wqh; /* wait for completion */
- unsigned int done; /* completion flag */
-} auerchain_chs_t,*pauerchain_chs_t;
-
-/* ...................................................................*/
-/* buffer element */
-struct auerbufctl; /* forward */
-typedef struct
-{
- char *bufp; /* reference to allocated data buffer */
- unsigned int len; /* number of characters in data buffer */
- unsigned int retries; /* for urb retries */
- struct usb_ctrlrequest *dr; /* for setup data in control messages */
- struct urb * urbp; /* USB urb */
- struct auerbufctl *list; /* pointer to list */
- struct list_head buff_list; /* reference to next buffer in list */
-} auerbuf_t,*pauerbuf_t;
-
-/* buffer list control block */
-typedef struct auerbufctl
-{
- spinlock_t lock; /* protection in interrupt */
- struct list_head free_buff_list;/* free buffers */
- struct list_head rec_buff_list; /* buffers with receive data */
-} auerbufctl_t,*pauerbufctl_t;
-
-/* ...................................................................*/
-/* service context */
-struct auerscon; /* forward */
-typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
-typedef void (*auer_disconn_t) (struct auerscon*);
-typedef struct auerscon
-{
- unsigned int id; /* protocol service id AUH_xxxx */
- auer_dispatch_t dispatch; /* dispatch read buffer */
- auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
-} auerscon_t,*pauerscon_t;
-
-/* ...................................................................*/
-/* USB device context */
-typedef struct
-{
- struct mutex mutex; /* protection in user context */
- char name[20]; /* name of the /dev/usb entry */
- unsigned int dtindex; /* index in the device table */
- struct usb_device * usbdev; /* USB device handle */
- int open_count; /* count the number of open character channels */
- char dev_desc[AUSI_DLEN];/* for storing a textual description */
- unsigned int maxControlLength; /* max. Length of control paket (without header) */
- struct urb * inturbp; /* interrupt urb */
- char * intbufp; /* data buffer for interrupt urb */
- unsigned int irqsize; /* size of interrupt endpoint 1 */
- struct auerchain controlchain; /* for chaining of control messages */
- auerbufctl_t bufctl; /* Buffer control for control transfers */
- pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
- unsigned int version; /* Version of the device */
- wait_queue_head_t bufferwait; /* wait for a control buffer */
-} auerswald_t,*pauerswald_t;
-
-/* ................................................................... */
-/* character device context */
-typedef struct
-{
- struct mutex mutex; /* protection in user context */
- pauerswald_t auerdev; /* context pointer of assigned device */
- auerbufctl_t bufctl; /* controls the buffer chain */
- auerscon_t scontext; /* service context */
- wait_queue_head_t readwait; /* for synchronous reading */
- struct mutex readmutex; /* protection against multiple reads */
- pauerbuf_t readbuf; /* buffer held for partial reading */
- unsigned int readoffset; /* current offset in readbuf */
- unsigned int removed; /* is != 0 if device is removed */
-} auerchar_t,*pauerchar_t;
-
-
-/*-------------------------------------------------------------------*/
-/* Forwards */
-static void auerswald_ctrlread_complete (struct urb * urb);
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
-static struct usb_driver auerswald_driver;
-
-
-/*-------------------------------------------------------------------*/
-/* USB chain helper functions */
-/* -------------------------- */
-
-/* completion function for chained urbs */
-static void auerchain_complete (struct urb * urb)
-{
- unsigned long flags;
- int result;
-
- /* get pointer to element and to chain */
- pauerchainelement_t acep = urb->context;
- pauerchain_t acp = acep->chain;
-
- /* restore original entries in urb */
- urb->context = acep->context;
- urb->complete = acep->complete;
-
- dbg ("auerchain_complete called");
-
- /* call original completion function
- NOTE: this function may lead to more urbs submitted into the chain.
- (no chain lock at calling complete()!)
- acp->active != NULL is protecting us against recursion.*/
- urb->complete (urb);
-
- /* detach element from chain data structure */
- spin_lock_irqsave (&acp->lock, flags);
- if (acp->active != acep) /* paranoia debug check */
- dbg ("auerchain_complete: completion on non-active element called!");
- else
- acp->active = NULL;
-
- /* add the used chain element to the list of free elements */
- list_add_tail (&acep->list, &acp->free_list);
- acep = NULL;
-
- /* is there a new element waiting in the chain? */
- if (!acp->active && !list_empty (&acp->waiting_list)) {
- /* yes: get the entry */
- struct list_head *tmp = acp->waiting_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- acp->active = acep;
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* submit the new urb */
- if (acep) {
- urb = acep->urbp;
- dbg ("auerchain_complete: submitting next urb from chain");
- urb->status = 0; /* needed! */
- result = usb_submit_urb(urb, GFP_ATOMIC);
-
- /* check for submit errors */
- if (result) {
- urb->status = result;
- dbg("auerchain_complete: usb_submit_urb with error code %d", result);
- /* and do error handling via *this* completion function (recursive) */
- auerchain_complete( urb);
- }
- } else {
- /* simple return without submitting a new urb.
- The empty chain is detected with acp->active == NULL. */
- };
-}
-
-
-/* submit function for chained urbs
- this function may be called from completion context or from user space!
- early = 1 -> submit in front of chain
-*/
-static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early)
-{
- int result;
- unsigned long flags;
- pauerchainelement_t acep = NULL;
-
- dbg ("auerchain_submit_urb called");
-
- /* try to get a chain element */
- spin_lock_irqsave (&acp->lock, flags);
- if (!list_empty (&acp->free_list)) {
- /* yes: get the entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* if no chain element available: return with error */
- if (!acep) {
- return -ENOMEM;
- }
-
- /* fill in the new chain element values */
- acep->chain = acp;
- acep->context = urb->context;
- acep->complete = urb->complete;
- acep->urbp = urb;
- INIT_LIST_HEAD (&acep->list);
-
- /* modify urb */
- urb->context = acep;
- urb->complete = auerchain_complete;
- urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
-
- /* add element to chain - or start it immediately */
- spin_lock_irqsave (&acp->lock, flags);
- if (acp->active) {
- /* there is traffic in the chain, simple add element to chain */
- if (early) {
- dbg ("adding new urb to head of chain");
- list_add (&acep->list, &acp->waiting_list);
- } else {
- dbg ("adding new urb to end of chain");
- list_add_tail (&acep->list, &acp->waiting_list);
- }
- acep = NULL;
- } else {
- /* the chain is empty. Prepare restart */
- acp->active = acep;
- }
- /* Spin has to be removed before usb_submit_urb! */
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* Submit urb if immediate restart */
- if (acep) {
- dbg("submitting urb immediate");
- urb->status = 0; /* needed! */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- /* check for submit errors */
- if (result) {
- urb->status = result;
- dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
- /* and do error handling via completion function */
- auerchain_complete( urb);
- }
- }
-
- return 0;
-}
-
-/* submit function for chained urbs
- this function may be called from completion context or from user space!
-*/
-static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
-{
- return auerchain_submit_urb_list (acp, urb, 0);
-}
-
-/* cancel an urb which is submitted to the chain
- the result is 0 if the urb is cancelled, or -EINPROGRESS if
- the function is successfully started.
-*/
-static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
-{
- unsigned long flags;
- struct urb * urbp;
- pauerchainelement_t acep;
- struct list_head *tmp;
-
- dbg ("auerchain_unlink_urb called");
-
- /* search the chain of waiting elements */
- spin_lock_irqsave (&acp->lock, flags);
- list_for_each (tmp, &acp->waiting_list) {
- acep = list_entry (tmp, auerchainelement_t, list);
- if (acep->urbp == urb) {
- list_del (tmp);
- urb->context = acep->context;
- urb->complete = acep->complete;
- list_add_tail (&acep->list, &acp->free_list);
- spin_unlock_irqrestore (&acp->lock, flags);
- dbg ("unlink waiting urb");
- urb->status = -ENOENT;
- urb->complete (urb);
- return 0;
- }
- }
- /* not found. */
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* get the active urb */
- acep = acp->active;
- if (acep) {
- urbp = acep->urbp;
-
- /* check if we have to cancel the active urb */
- if (urbp == urb) {
- /* note that there is a race condition between the check above
- and the unlink() call because of no lock. This race is harmless,
- because the usb module will detect the unlink() after completion.
- We can't use the acp->lock here because the completion function
- wants to grab it.
- */
- dbg ("unlink active urb");
- return usb_unlink_urb (urbp);
- }
- }
-
- /* not found anyway
- ... is some kind of success
- */
- dbg ("urb to unlink not found in chain");
- return 0;
-}
-
-/* cancel all urbs which are in the chain.
- this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_unlink_all (pauerchain_t acp)
-{
- unsigned long flags;
- struct urb * urbp;
- pauerchainelement_t acep;
-
- dbg ("auerchain_unlink_all called");
-
- /* clear the chain of waiting elements */
- spin_lock_irqsave (&acp->lock, flags);
- while (!list_empty (&acp->waiting_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->waiting_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- urbp = acep->urbp;
- urbp->context = acep->context;
- urbp->complete = acep->complete;
- list_add_tail (&acep->list, &acp->free_list);
- spin_unlock_irqrestore (&acp->lock, flags);
- dbg ("unlink waiting urb");
- urbp->status = -ENOENT;
- urbp->complete (urbp);
- spin_lock_irqsave (&acp->lock, flags);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* clear the active urb */
- acep = acp->active;
- if (acep) {
- urbp = acep->urbp;
- dbg ("unlink active urb");
- usb_kill_urb (urbp);
- }
-}
-
-
-/* free the chain.
- this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_free (pauerchain_t acp)
-{
- unsigned long flags;
- pauerchainelement_t acep;
-
- dbg ("auerchain_free called");
-
- /* first, cancel all pending urbs */
- auerchain_unlink_all (acp);
-
- /* free the elements */
- spin_lock_irqsave (&acp->lock, flags);
- while (!list_empty (&acp->free_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- spin_unlock_irqrestore (&acp->lock, flags);
- acep = list_entry (tmp, auerchainelement_t, list);
- kfree (acep);
- spin_lock_irqsave (&acp->lock, flags);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-}
-
-
-/* Init the chain control structure */
-static void auerchain_init (pauerchain_t acp)
-{
- /* init the chain data structure */
- acp->active = NULL;
- spin_lock_init (&acp->lock);
- INIT_LIST_HEAD (&acp->waiting_list);
- INIT_LIST_HEAD (&acp->free_list);
-}
-
-/* setup a chain.
- It is assumed that there is no concurrency while setting up the chain
- requirement: auerchain_init()
-*/
-static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
-{
- pauerchainelement_t acep;
-
- dbg ("auerchain_setup called with %d elements", numElements);
-
- /* fill the list of free elements */
- for (;numElements; numElements--) {
- acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL);
- if (!acep)
- goto ac_fail;
- INIT_LIST_HEAD (&acep->list);
- list_add_tail (&acep->list, &acp->free_list);
- }
- return 0;
-
-ac_fail:/* free the elements */
- while (!list_empty (&acp->free_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- kfree (acep);
- }
- return -ENOMEM;
-}
-
-
-/* completion handler for synchronous chained URBs */
-static void auerchain_blocking_completion (struct urb *urb)
-{
- pauerchain_chs_t pchs = urb->context;
- pchs->done = 1;
- wmb();
- wake_up (&pchs->wqh);
-}
-
-
-/* Starts chained urb and waits for completion or timeout */
-static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length)
-{
- auerchain_chs_t chs;
- int status;
-
- dbg ("auerchain_start_wait_urb called");
- init_waitqueue_head (&chs.wqh);
- chs.done = 0;
-
- urb->context = &chs;
- status = auerchain_submit_urb (acp, urb);
- if (status)
- /* something went wrong */
- return status;
-
- timeout = wait_event_timeout(chs.wqh, chs.done, timeout);
-
- if (!timeout && !chs.done) {
- if (urb->status != -EINPROGRESS) { /* No callback?!! */
- dbg ("auerchain_start_wait_urb: raced timeout");
- status = urb->status;
- } else {
- dbg ("auerchain_start_wait_urb: timeout");
- auerchain_unlink_urb (acp, urb); /* remove urb safely */
- status = -ETIMEDOUT;
- }
- } else
- status = urb->status;
-
- if (status >= 0)
- *actual_length = urb->actual_length;
-
- return status;
-}
-
-
-/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
- acp: pointer to the auerchain
- dev: pointer to the usb device to send the message to
- pipe: endpoint "pipe" to send the message to
- request: USB message request value
- requesttype: USB message request type value
- value: USB message value
- index: USB message index value
- data: pointer to the data to send
- size: length in bytes of the data to send
- timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
-
- This function sends a simple control message to a specified endpoint
- and waits for the message to complete, or timeout.
-
- If successful, it returns the transferred length, otherwise a negative error number.
-
- Don't use this function from within an interrupt context, like a
- bottom half handler. If you need an asynchronous message, or need to send
- a message from within interrupt context, use auerchain_submit_urb()
-*/
-static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
- __u16 value, __u16 index, void *data, __u16 size, int timeout)
-{
- int ret;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- int uninitialized_var(length);
-
- dbg ("auerchain_control_msg");
- dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
- urb = usb_alloc_urb (0, GFP_KERNEL);
- if (!urb) {
- kfree (dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = requesttype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16 (value);
- dr->wIndex = cpu_to_le16 (index);
- dr->wLength = cpu_to_le16 (size);
-
- usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
- auerchain_blocking_completion, NULL);
- ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
-
- usb_free_urb (urb);
- kfree (dr);
-
- if (ret < 0)
- return ret;
- else
- return length;
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Buffer List helper functions */
-
-/* free a single auerbuf */
-static void auerbuf_free (pauerbuf_t bp)
-{
- kfree(bp->bufp);
- kfree(bp->dr);
- usb_free_urb(bp->urbp);
- kfree(bp);
-}
-
-/* free the buffers from an auerbuf list */
-static void auerbuf_free_list (struct list_head *q)
-{
- struct list_head *tmp;
- struct list_head *p;
- pauerbuf_t bp;
-
- dbg ("auerbuf_free_list");
- for (p = q->next; p != q;) {
- bp = list_entry (p, auerbuf_t, buff_list);
- tmp = p->next;
- list_del (p);
- p = tmp;
- auerbuf_free (bp);
- }
-}
-
-/* init the members of a list control block */
-static void auerbuf_init (pauerbufctl_t bcp)
-{
- dbg ("auerbuf_init");
- spin_lock_init (&bcp->lock);
- INIT_LIST_HEAD (&bcp->free_buff_list);
- INIT_LIST_HEAD (&bcp->rec_buff_list);
-}
-
-/* free all buffers from an auerbuf chain */
-static void auerbuf_free_buffers (pauerbufctl_t bcp)
-{
- unsigned long flags;
- dbg ("auerbuf_free_buffers");
-
- spin_lock_irqsave (&bcp->lock, flags);
-
- auerbuf_free_list (&bcp->free_buff_list);
- auerbuf_free_list (&bcp->rec_buff_list);
-
- spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-/* setup a list of buffers */
-/* requirement: auerbuf_init() */
-static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
-{
- pauerbuf_t bep = NULL;
-
- dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
-
- /* fill the list of free elements */
- for (;numElements; numElements--) {
- bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL);
- if (!bep)
- goto bl_fail;
- bep->list = bcp;
- INIT_LIST_HEAD (&bep->buff_list);
- bep->bufp = kmalloc (bufsize, GFP_KERNEL);
- if (!bep->bufp)
- goto bl_fail;
- bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
- if (!bep->dr)
- goto bl_fail;
- bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
- if (!bep->urbp)
- goto bl_fail;
- list_add_tail (&bep->buff_list, &bcp->free_buff_list);
- }
- return 0;
-
-bl_fail:/* not enough memory. Free allocated elements */
- dbg ("auerbuf_setup: no more memory");
- auerbuf_free(bep);
- auerbuf_free_buffers (bcp);
- return -ENOMEM;
-}
-
-/* insert a used buffer into the free list */
-static void auerbuf_releasebuf( pauerbuf_t bp)
-{
- unsigned long flags;
- pauerbufctl_t bcp = bp->list;
- bp->retries = 0;
-
- dbg ("auerbuf_releasebuf called");
- spin_lock_irqsave (&bcp->lock, flags);
- list_add_tail (&bp->buff_list, &bcp->free_buff_list);
- spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Completion handlers */
-
-/* Values of urb->status or results of usb_submit_urb():
-0 Initial, OK
--EINPROGRESS during submission until end
--ENOENT if urb is unlinked
--ETIME Device did not respond
--ENOMEM Memory Overflow
--ENODEV Specified USB-device or bus doesn't exist
--ENXIO URB already queued
--EINVAL a) Invalid transfer type specified (or not supported)
- b) Invalid interrupt interval (0n256)
--EAGAIN a) Specified ISO start frame too early
- b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
--EFBIG Too much ISO frames requested (currently uhci900)
--EPIPE Specified pipe-handle/Endpoint is already stalled
--EMSGSIZE Endpoint message size is zero, do interface/alternate setting
--EPROTO a) Bitstuff error
- b) Unknown USB error
--EILSEQ CRC mismatch
--ENOSR Buffer error
--EREMOTEIO Short packet detected
--EXDEV ISO transfer only partially completed look at individual frame status for details
--EINVAL ISO madness, if this happens: Log off and go home
--EOVERFLOW babble
-*/
-
-/* check if a status code allows a retry */
-static int auerswald_status_retry (int status)
-{
- switch (status) {
- case 0:
- case -ETIME:
- case -EOVERFLOW:
- case -EAGAIN:
- case -EPIPE:
- case -EPROTO:
- case -EILSEQ:
- case -ENOSR:
- case -EREMOTEIO:
- return 1; /* do a retry */
- }
- return 0; /* no retry possible */
-}
-
-/* Completion of asynchronous write block */
-static void auerchar_ctrlwrite_complete (struct urb * urb)
-{
- pauerbuf_t bp = urb->context;
- pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
- dbg ("auerchar_ctrlwrite_complete called");
-
- /* reuse the buffer */
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-}
-
-/* Completion handler for dummy retry packet */
-static void auerswald_ctrlread_wretcomplete (struct urb * urb)
-{
- pauerbuf_t bp = urb->context;
- pauerswald_t cp;
- int ret;
- int status = urb->status;
-
- dbg ("auerswald_ctrlread_wretcomplete called");
- dbg ("complete with status: %d", status);
- cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
- /* check if it is possible to advance */
- if (!auerswald_status_retry(status) || !cp->usbdev) {
- /* reuse the buffer */
- err ("control dummy: transmission error %d, can not retry", status);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- return;
- }
-
- /* fill the control message */
- bp->dr->bRequestType = AUT_RREQ;
- bp->dr->bRequest = AUV_RBLOCK;
- bp->dr->wLength = bp->dr->wValue; /* temporary stored */
- bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
- /* bp->dr->index = channel id; remains */
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
- auerswald_ctrlread_complete,bp);
-
- /* submit the control msg as next paket */
- ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
- if (ret) {
- dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_complete (bp->urbp);
- }
-}
-
-/* completion handler for receiving of control messages */
-static void auerswald_ctrlread_complete (struct urb * urb)
-{
- unsigned int serviceid;
- pauerswald_t cp;
- pauerscon_t scp;
- pauerbuf_t bp = urb->context;
- int status = urb->status;
- int ret;
-
- dbg ("auerswald_ctrlread_complete called");
-
- cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
- /* check if there is valid data in this urb */
- if (status) {
- dbg ("complete with non-zero status: %d", status);
- /* should we do a retry? */
- if (!auerswald_status_retry(status)
- || !cp->usbdev
- || (cp->version < AUV_RETRY)
- || (bp->retries >= AU_RETRIES)) {
- /* reuse the buffer */
- err ("control read: transmission error %d, can not retry", status);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- return;
- }
- bp->retries++;
- dbg ("Retry count = %d", bp->retries);
- /* send a long dummy control-write-message to allow device firmware to react */
- bp->dr->bRequestType = AUT_WREQ;
- bp->dr->bRequest = AUV_DUMMY;
- bp->dr->wValue = bp->dr->wLength; /* temporary storage */
- // bp->dr->wIndex channel ID remains
- bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, 32,
- auerswald_ctrlread_wretcomplete,bp);
-
- /* submit the control msg as next paket */
- ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
- if (ret) {
- dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_wretcomplete (bp->urbp);
- }
- return;
- }
-
- /* get the actual bytecount (incl. headerbyte) */
- bp->len = urb->actual_length;
- serviceid = bp->bufp[0] & AUH_TYPEMASK;
- dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
-
- /* dispatch the paket */
- scp = cp->services[serviceid];
- if (scp) {
- /* look, Ma, a listener! */
- scp->dispatch (scp, bp);
- }
-
- /* release the paket */
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-}
-
-/*-------------------------------------------------------------------*/
-/* Handling of Interrupt Endpoint */
-/* This interrupt Endpoint is used to inform the host about waiting
- messages from the USB device.
-*/
-/* int completion handler. */
-static void auerswald_int_complete (struct urb * urb)
-{
- unsigned long flags;
- unsigned int channelid;
- unsigned int bytecount;
- int ret;
- int status = urb->status;
- pauerbuf_t bp = NULL;
- pauerswald_t cp = urb->context;
-
- dbg ("%s called", __func__);
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, status);
- return;
- default:
- dbg("%s - nonzero urb status received: %d", __func__, status);
- goto exit;
- }
-
- /* check if all needed data was received */
- if (urb->actual_length < AU_IRQMINSIZE) {
- dbg ("invalid data length received: %d bytes", urb->actual_length);
- goto exit;
- }
-
- /* check the command code */
- if (cp->intbufp[0] != AU_IRQCMDID) {
- dbg ("invalid command received: %d", cp->intbufp[0]);
- goto exit;
- }
-
- /* check the command type */
- if (cp->intbufp[1] != AU_BLOCKRDY) {
- dbg ("invalid command type received: %d", cp->intbufp[1]);
- goto exit;
- }
-
- /* now extract the information */
- channelid = cp->intbufp[2];
- bytecount = (unsigned char)cp->intbufp[3];
- bytecount |= (unsigned char)cp->intbufp[4] << 8;
-
- /* check the channel id */
- if (channelid >= AUH_TYPESIZE) {
- dbg ("invalid channel id received: %d", channelid);
- goto exit;
- }
-
- /* check the byte count */
- if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
- dbg ("invalid byte count received: %d", bytecount);
- goto exit;
- }
- dbg ("Service Channel = %d", channelid);
- dbg ("Byte Count = %d", bytecount);
-
- /* get a buffer for the next data paket */
- spin_lock_irqsave (&cp->bufctl.lock, flags);
- if (!list_empty (&cp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = cp->bufctl.free_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
- /* if no buffer available: skip it */
- if (!bp) {
- dbg ("auerswald_int_complete: no data buffer available");
- /* can we do something more?
- This is a big problem: if this int packet is ignored, the
- device will wait forever and not signal any more data.
- The only real solution is: having enough buffers!
- Or perhaps temporary disabling the int endpoint?
- */
- goto exit;
- }
-
- /* fill the control message */
- bp->dr->bRequestType = AUT_RREQ;
- bp->dr->bRequest = AUV_RBLOCK;
- bp->dr->wValue = cpu_to_le16 (0);
- bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
- bp->dr->wLength = cpu_to_le16 (bytecount);
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, bytecount,
- auerswald_ctrlread_complete,bp);
-
- /* submit the control msg */
- ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
- if (ret) {
- dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_complete( bp->urbp);
- /* here applies the same problem as above: device locking! */
- }
-exit:
- ret = usb_submit_urb (urb, GFP_ATOMIC);
- if (ret)
- err ("%s - usb_submit_urb failed with result %d",
- __func__, ret);
-}
-
-/* int memory deallocation
- NOTE: no mutex please!
-*/
-static void auerswald_int_free (pauerswald_t cp)
-{
- if (cp->inturbp) {
- usb_free_urb(cp->inturbp);
- cp->inturbp = NULL;
- }
- kfree(cp->intbufp);
- cp->intbufp = NULL;
-}
-
-/* This function is called to activate the interrupt
- endpoint. This function returns 0 if successful or an error code.
- NOTE: no mutex please!
-*/
-static int auerswald_int_open (pauerswald_t cp)
-{
- int ret;
- struct usb_host_endpoint *ep;
- int irqsize;
- dbg ("auerswald_int_open");
-
- ep = cp->usbdev->ep_in[AU_IRQENDP];
- if (!ep) {
- ret = -EFAULT;
- goto intoend;
- }
- irqsize = le16_to_cpu(ep->desc.wMaxPacketSize);
- cp->irqsize = irqsize;
-
- /* allocate the urb and data buffer */
- if (!cp->inturbp) {
- cp->inturbp = usb_alloc_urb (0, GFP_KERNEL);
- if (!cp->inturbp) {
- ret = -ENOMEM;
- goto intoend;
- }
- }
- if (!cp->intbufp) {
- cp->intbufp = kmalloc (irqsize, GFP_KERNEL);
- if (!cp->intbufp) {
- ret = -ENOMEM;
- goto intoend;
- }
- }
- /* setup urb */
- usb_fill_int_urb (cp->inturbp, cp->usbdev,
- usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp,
- irqsize, auerswald_int_complete, cp, ep->desc.bInterval);
- /* start the urb */
- cp->inturbp->status = 0; /* needed! */
- ret = usb_submit_urb (cp->inturbp, GFP_KERNEL);
-
-intoend:
- if (ret < 0) {
- /* activation of interrupt endpoint has failed. Now clean up. */
- dbg ("auerswald_int_open: activation of int endpoint failed");
-
- /* deallocate memory */
- auerswald_int_free (cp);
- }
- return ret;
-}
-
-/* This function is called to deactivate the interrupt
- endpoint. This function returns 0 if successful or an error code.
- NOTE: no mutex please!
-*/
-static void auerswald_int_release (pauerswald_t cp)
-{
- dbg ("auerswald_int_release");
-
- /* stop the int endpoint */
- usb_kill_urb (cp->inturbp);
-
- /* deallocate memory */
- auerswald_int_free (cp);
-}
-
-/* --------------------------------------------------------------------- */
-/* Helper functions */
-
-/* wake up waiting readers */
-static void auerchar_disconnect (pauerscon_t scp)
-{
- pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
- dbg ("auerchar_disconnect called");
- ccp->removed = 1;
- wake_up (&ccp->readwait);
-}
-
-
-/* dispatch a read paket to a waiting character device */
-static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
-{
- unsigned long flags;
- pauerchar_t ccp;
- pauerbuf_t newbp = NULL;
- char * charp;
- dbg ("auerchar_ctrlread_dispatch called");
- ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
-
- /* get a read buffer from character device context */
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- if (!list_empty (&ccp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = ccp->bufctl.free_buff_list.next;
- list_del (tmp);
- newbp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
- if (!newbp) {
- dbg ("No read buffer available, discard paket!");
- return; /* no buffer, no dispatch */
- }
-
- /* copy information to new buffer element
- (all buffers have the same length) */
- charp = newbp->bufp;
- newbp->bufp = bp->bufp;
- bp->bufp = charp;
- newbp->len = bp->len;
-
- /* insert new buffer in read list */
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
- dbg ("read buffer appended to rec_list");
-
- /* wake up pending synchronous reads */
- wake_up (&ccp->readwait);
-}
-
-
-/* Delete an auerswald driver context */
-static void auerswald_delete( pauerswald_t cp)
-{
- dbg( "auerswald_delete");
- if (cp == NULL)
- return;
-
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-
- /* Cleaning up */
- auerswald_int_release (cp);
- auerchain_free (&cp->controlchain);
- auerbuf_free_buffers (&cp->bufctl);
-
- /* release the memory */
- kfree( cp);
-}
-
-
-/* Delete an auerswald character context */
-static void auerchar_delete( pauerchar_t ccp)
-{
- dbg ("auerchar_delete");
- if (ccp == NULL)
- return;
-
- /* wake up pending synchronous reads */
- ccp->removed = 1;
- wake_up (&ccp->readwait);
-
- /* remove the read buffer */
- if (ccp->readbuf) {
- auerbuf_releasebuf (ccp->readbuf);
- ccp->readbuf = NULL;
- }
-
- /* remove the character buffers */
- auerbuf_free_buffers (&ccp->bufctl);
-
- /* release the memory */
- kfree( ccp);
-}
-
-
-/* add a new service to the device
- scp->id must be set!
- return: 0 if OK, else error code
-*/
-static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
-{
- int ret;
-
- /* is the device available? */
- if (!cp->usbdev) {
- dbg ("usbdev == NULL");
- return -EIO; /*no: can not add a service, sorry*/
- }
-
- /* is the service available? */
- if (cp->services[scp->id]) {
- dbg ("service is busy");
- return -EBUSY;
- }
-
- /* device is available, service is free */
- cp->services[scp->id] = scp;
-
- /* register service in device */
- ret = auerchain_control_msg(
- &cp->controlchain, /* pointer to control chain */
- cp->usbdev, /* pointer to device */
- usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
- AUV_CHANNELCTL, /* USB message request value */
- AUT_WREQ, /* USB message request type value */
- 0x01, /* open USB message value */
- scp->id, /* USB message index value */
- NULL, /* pointer to the data to send */
- 0, /* length in bytes of the data to send */
- HZ * 2); /* time to wait for the message to complete before timing out */
- if (ret < 0) {
- dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
- /* undo above actions */
- cp->services[scp->id] = NULL;
- return ret;
- }
-
- dbg ("auerswald_addservice: channel open OK");
- return 0;
-}
-
-
-/* remove a service from the device
- scp->id must be set! */
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
-{
- dbg ("auerswald_removeservice called");
-
- /* check if we have a service allocated */
- if (scp->id == AUH_UNASSIGNED)
- return;
-
- /* If there is a device: close the channel */
- if (cp->usbdev) {
- /* Close the service channel inside the device */
- int ret = auerchain_control_msg(
- &cp->controlchain, /* pointer to control chain */
- cp->usbdev, /* pointer to device */
- usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
- AUV_CHANNELCTL, /* USB message request value */
- AUT_WREQ, /* USB message request type value */
- 0x00, // close /* USB message value */
- scp->id, /* USB message index value */
- NULL, /* pointer to the data to send */
- 0, /* length in bytes of the data to send */
- HZ * 2); /* time to wait for the message to complete before timing out */
- if (ret < 0) {
- dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
- }
- else {
- dbg ("auerswald_removeservice: channel close OK");
- }
- }
-
- /* remove the service from the device */
- cp->services[scp->id] = NULL;
- scp->id = AUH_UNASSIGNED;
-}
-
-
-/* --------------------------------------------------------------------- */
-/* Char device functions */
-
-/* Open a new character device */
-static int auerchar_open (struct inode *inode, struct file *file)
-{
- int dtindex = iminor(inode);
- pauerswald_t cp = NULL;
- pauerchar_t ccp = NULL;
- struct usb_interface *intf;
- int ret;
-
- /* minor number in range? */
- if (dtindex < 0) {
- return -ENODEV;
- }
- intf = usb_find_interface(&auerswald_driver, dtindex);
- if (!intf) {
- return -ENODEV;
- }
-
- /* usb device available? */
- cp = usb_get_intfdata (intf);
- if (cp == NULL) {
- return -ENODEV;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- return -ERESTARTSYS;
- }
-
- /* we have access to the device. Now lets allocate memory */
- ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL);
- if (ccp == NULL) {
- err ("out of memory");
- ret = -ENOMEM;
- goto ofail;
- }
-
- /* Initialize device descriptor */
- mutex_init(&ccp->mutex);
- mutex_init(&ccp->readmutex);
- auerbuf_init (&ccp->bufctl);
- ccp->scontext.id = AUH_UNASSIGNED;
- ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
- ccp->scontext.disconnect = auerchar_disconnect;
- init_waitqueue_head (&ccp->readwait);
-
- ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
- if (ret) {
- goto ofail;
- }
-
- cp->open_count++;
- ccp->auerdev = cp;
- dbg("open %s as /dev/%s", cp->dev_desc, cp->name);
- mutex_unlock(&cp->mutex);
-
- /* file IO stuff */
- file->f_pos = 0;
- file->private_data = ccp;
- return nonseekable_open(inode, file);
-
- /* Error exit */
-ofail: mutex_unlock(&cp->mutex);
- auerchar_delete (ccp);
- return ret;
-}
-
-
-/* IOCTL functions */
-static long auerchar_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- int ret = 0;
- audevinfo_t devinfo;
- pauerswald_t cp = NULL;
- unsigned int u;
- unsigned int __user *user_arg = (unsigned int __user *)arg;
-
- dbg ("ioctl");
-
- /* get the mutexes */
- if (mutex_lock_interruptible(&ccp->mutex)) {
- return -ERESTARTSYS;
- }
- cp = ccp->auerdev;
- if (!cp) {
- mutex_unlock(&ccp->mutex);
- return -ENODEV;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
-
- /* Check for removal */
- if (!cp->usbdev) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -ENODEV;
- }
- lock_kernel();
- switch (cmd) {
-
- /* return != 0 if Transmitt channel ready to send */
- case IOCTL_AU_TXREADY:
- dbg ("IOCTL_AU_TXREADY");
- u = ccp->auerdev
- && (ccp->scontext.id != AUH_UNASSIGNED)
- && !list_empty (&cp->bufctl.free_buff_list);
- ret = put_user (u, user_arg);
- break;
-
- /* return != 0 if connected to a service channel */
- case IOCTL_AU_CONNECT:
- dbg ("IOCTL_AU_CONNECT");
- u = (ccp->scontext.id != AUH_UNASSIGNED);
- ret = put_user (u, user_arg);
- break;
-
- /* return != 0 if Receive Data available */
- case IOCTL_AU_RXAVAIL:
- dbg ("IOCTL_AU_RXAVAIL");
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- ret = -EIO;
- break;
- }
- u = 0; /* no data */
- if (ccp->readbuf) {
- int restlen = ccp->readbuf->len - ccp->readoffset;
- if (restlen > 0)
- u = 1;
- }
- if (!u) {
- if (!list_empty (&ccp->bufctl.rec_buff_list)) {
- u = 1;
- }
- }
- ret = put_user (u, user_arg);
- break;
-
- /* return the max. buffer length for the device */
- case IOCTL_AU_BUFLEN:
- dbg ("IOCTL_AU_BUFLEN");
- u = cp->maxControlLength;
- ret = put_user (u, user_arg);
- break;
-
- /* requesting a service channel */
- case IOCTL_AU_SERVREQ:
- dbg ("IOCTL_AU_SERVREQ");
- /* requesting a service means: release the previous one first */
- auerswald_removeservice (cp, &ccp->scontext);
- /* get the channel number */
- ret = get_user (u, user_arg);
- if (ret) {
- break;
- }
- if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
- ret = -EIO;
- break;
- }
- dbg ("auerchar service request parameters are ok");
- ccp->scontext.id = u;
-
- /* request the service now */
- ret = auerswald_addservice (cp, &ccp->scontext);
- if (ret) {
- /* no: revert service entry */
- ccp->scontext.id = AUH_UNASSIGNED;
- }
- break;
-
- /* get a string descriptor for the device */
- case IOCTL_AU_DEVINFO:
- dbg ("IOCTL_AU_DEVINFO");
- if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) {
- ret = -EFAULT;
- break;
- }
- u = strlen(cp->dev_desc)+1;
- if (u > devinfo.bsize) {
- u = devinfo.bsize;
- }
- ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0;
- break;
-
- /* get the max. string descriptor length */
- case IOCTL_AU_SLEN:
- dbg ("IOCTL_AU_SLEN");
- u = AUSI_DLEN;
- ret = put_user (u, user_arg);
- break;
-
- default:
- dbg ("IOCTL_AU_UNKNOWN");
- ret = -ENOTTY;
- break;
- }
- unlock_kernel();
- /* release the mutexes */
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return ret;
-}
-
-/* Read data from the device */
-static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos)
-{
- unsigned long flags;
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerbuf_t bp = NULL;
- wait_queue_t wait;
-
- dbg ("auerchar_read");
-
- /* Error checking */
- if (!ccp)
- return -EIO;
- if (*ppos)
- return -ESPIPE;
- if (count == 0)
- return 0;
-
- /* get the mutex */
- if (mutex_lock_interruptible(&ccp->mutex))
- return -ERESTARTSYS;
-
- /* Can we expect to read something? */
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
-
- /* only one reader per device allowed */
- if (mutex_lock_interruptible(&ccp->readmutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
-
- /* read data from readbuf, if available */
-doreadbuf:
- bp = ccp->readbuf;
- if (bp) {
- /* read the maximum bytes */
- int restlen = bp->len - ccp->readoffset;
- if (restlen < 0)
- restlen = 0;
- if (count > restlen)
- count = restlen;
- if (count) {
- if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
- dbg ("auerswald_read: copy_to_user failed");
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return -EFAULT;
- }
- }
- /* advance the read offset */
- ccp->readoffset += count;
- restlen -= count;
- // reuse the read buffer
- if (restlen <= 0) {
- auerbuf_releasebuf (bp);
- ccp->readbuf = NULL;
- }
- /* return with number of bytes read */
- if (count) {
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return count;
- }
- }
-
- /* a read buffer is not available. Try to get the next data block. */
-doreadlist:
- /* Preparing for sleep */
- init_waitqueue_entry (&wait, current);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&ccp->readwait, &wait);
-
- bp = NULL;
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- if (!list_empty (&ccp->bufctl.rec_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
- /* have we got data? */
- if (bp) {
- ccp->readbuf = bp;
- ccp->readoffset = AUH_SIZE; /* for headerbyte */
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ccp->readwait, &wait);
- goto doreadbuf; /* now we can read! */
- }
-
- /* no data available. Should we wait? */
- if (file->f_flags & O_NONBLOCK) {
- dbg ("No read buffer available, returning -EAGAIN");
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ccp->readwait, &wait);
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return -EAGAIN; /* nonblocking, no data available */
- }
-
- /* yes, we should wait! */
- mutex_unlock(&ccp->mutex); /* allow other operations while we wait */
- schedule();
- remove_wait_queue (&ccp->readwait, &wait);
- if (signal_pending (current)) {
- /* waked up by a signal */
- mutex_unlock(&ccp->readmutex);
- return -ERESTARTSYS;
- }
-
- /* Anything left to read? */
- if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
- mutex_unlock(&ccp->readmutex);
- return -EIO;
- }
-
- if (mutex_lock_interruptible(&ccp->mutex)) {
- mutex_unlock(&ccp->readmutex);
- return -ERESTARTSYS;
- }
-
- /* try to read the incoming data again */
- goto doreadlist;
-}
-
-
-/* Write a data block into the right service channel of the device */
-static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerswald_t cp = NULL;
- pauerbuf_t bp;
- unsigned long flags;
- int ret;
- wait_queue_t wait;
-
- dbg ("auerchar_write %zd bytes", len);
-
- /* Error checking */
- if (!ccp)
- return -EIO;
- if (*ppos)
- return -ESPIPE;
- if (len == 0)
- return 0;
-
-write_again:
- /* get the mutex */
- if (mutex_lock_interruptible(&ccp->mutex))
- return -ERESTARTSYS;
-
- /* Can we expect to write something? */
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
-
- cp = ccp->auerdev;
- if (!cp) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
- if (!cp->usbdev) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
- /* Prepare for sleep */
- init_waitqueue_entry (&wait, current);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&cp->bufferwait, &wait);
-
- /* Try to get a buffer from the device pool.
- We can't use a buffer from ccp->bufctl because the write
- command will last beond a release() */
- bp = NULL;
- spin_lock_irqsave (&cp->bufctl.lock, flags);
- if (!list_empty (&cp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = cp->bufctl.free_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
- /* are there any buffers left? */
- if (!bp) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
-
- /* NONBLOCK: don't wait */
- if (file->f_flags & O_NONBLOCK) {
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&cp->bufferwait, &wait);
- return -EAGAIN;
- }
-
- /* BLOCKING: wait */
- schedule();
- remove_wait_queue (&cp->bufferwait, &wait);
- if (signal_pending (current)) {
- /* waked up by a signal */
- return -ERESTARTSYS;
- }
- goto write_again;
- } else {
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&cp->bufferwait, &wait);
- }
-
- /* protect against too big write requests */
- if (len > cp->maxControlLength)
- len = cp->maxControlLength;
-
- /* Fill the buffer */
- if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
- dbg ("copy_from_user failed");
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -EFAULT;
- }
-
- /* set the header byte */
- *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
-
- /* Set the transfer Parameters */
- bp->len = len+AUH_SIZE;
- bp->dr->bRequestType = AUT_WREQ;
- bp->dr->bRequest = AUV_WBLOCK;
- bp->dr->wValue = cpu_to_le16 (0);
- bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
- bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
- auerchar_ctrlwrite_complete, bp);
- /* up we go */
- ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
- mutex_unlock(&cp->mutex);
- if (ret) {
- dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
- else {
- dbg ("auerchar_write: Write OK");
- mutex_unlock(&ccp->mutex);
- return len;
- }
-}
-
-
-/* Close a character device */
-static int auerchar_release (struct inode *inode, struct file *file)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerswald_t cp;
- dbg("release");
-
- mutex_lock(&ccp->mutex);
- cp = ccp->auerdev;
- if (cp) {
- mutex_lock(&cp->mutex);
- /* remove an open service */
- auerswald_removeservice (cp, &ccp->scontext);
- /* detach from device */
- if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
- /* usb device waits for removal */
- mutex_unlock(&cp->mutex);
- auerswald_delete (cp);
- } else {
- mutex_unlock(&cp->mutex);
- }
- cp = NULL;
- ccp->auerdev = NULL;
- }
- mutex_unlock(&ccp->mutex);
- auerchar_delete (ccp);
-
- return 0;
-}
-
-
-/*----------------------------------------------------------------------*/
-/* File operation structure */
-static const struct file_operations auerswald_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = auerchar_read,
- .write = auerchar_write,
- .unlocked_ioctl = auerchar_ioctl,
- .open = auerchar_open,
- .release = auerchar_release,
-};
-
-static struct usb_class_driver auerswald_class = {
- .name = "auer%d",
- .fops = &auerswald_fops,
- .minor_base = AUER_MINOR_BASE,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Special USB driver functions */
-
-/* Probe if this driver wants to serve an USB device
-
- This entry point is called whenever a new device is attached to the bus.
- Then the device driver has to create a new instance of its internal data
- structures for the new device.
-
- The dev argument specifies the device context, which contains pointers
- to all USB descriptors. The interface argument specifies the interface
- number. If a USB driver wants to bind itself to a particular device and
- interface it has to return a pointer. This pointer normally references
- the device driver's context structure.
-
- Probing normally is done by checking the vendor and product identifications
- or the class and subclass definitions. If they match the interface number
- is compared with the ones supported by the driver. When probing is done
- class based it might be necessary to parse some more USB descriptors because
- the device properties can differ in a wide range.
-*/
-static int auerswald_probe (struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *usbdev = interface_to_usbdev(intf);
- pauerswald_t cp = NULL;
- unsigned int u = 0;
- __le16 *pbuf;
- int ret;
-
- dbg ("probe: vendor id 0x%x, device id 0x%x",
- le16_to_cpu(usbdev->descriptor.idVendor),
- le16_to_cpu(usbdev->descriptor.idProduct));
-
- /* we use only the first -and only- interface */
- if (intf->altsetting->desc.bInterfaceNumber != 0)
- return -ENODEV;
-
- /* allocate memory for our device and initialize it */
- cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL);
- if (cp == NULL) {
- err ("out of memory");
- goto pfail;
- }
-
- /* Initialize device descriptor */
- mutex_init(&cp->mutex);
- cp->usbdev = usbdev;
- auerchain_init (&cp->controlchain);
- auerbuf_init (&cp->bufctl);
- init_waitqueue_head (&cp->bufferwait);
-
- ret = usb_register_dev(intf, &auerswald_class);
- if (ret) {
- err ("Not able to get a minor for this device.");
- goto pfail;
- }
-
- /* Give the device a name */
- sprintf (cp->name, "usb/auer%d", intf->minor);
-
- /* Store the index */
- cp->dtindex = intf->minor;
-
- /* Get the usb version of the device */
- cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice);
- dbg ("Version is %X", cp->version);
-
- /* allow some time to settle the device */
- msleep(334);
-
- /* Try to get a suitable textual description of the device */
- /* Device name:*/
- ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
- if (ret >= 0) {
- u += ret;
- /* Append Serial Number */
- memcpy(&cp->dev_desc[u], ",Ser# ", 6);
- u += 6;
- ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
- if (ret >= 0) {
- u += ret;
- /* Append subscriber number */
- memcpy(&cp->dev_desc[u], ", ", 2);
- u += 2;
- ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
- if (ret >= 0) {
- u += ret;
- }
- }
- }
- cp->dev_desc[u] = '\0';
- info("device is a %s", cp->dev_desc);
-
- /* get the maximum allowed control transfer length */
- pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
- if (!pbuf) {
- err( "out of memory");
- goto pfail;
- }
- ret = usb_control_msg(cp->usbdev, /* pointer to device */
- usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
- AUV_GETINFO, /* USB message request value */
- AUT_RREQ, /* USB message request type value */
- 0, /* USB message value */
- AUDI_MBCTRANS, /* USB message index value */
- pbuf, /* pointer to the receive buffer */
- 2, /* length of the buffer */
- 2000); /* time to wait for the message to complete before timing out */
- if (ret == 2) {
- cp->maxControlLength = le16_to_cpup(pbuf);
- kfree(pbuf);
- dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
- } else {
- kfree(pbuf);
- err("setup: getting max. allowed control transfer length failed with error %d", ret);
- goto pfail;
- }
-
- /* allocate a chain for the control messages */
- if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
- err ("out of memory");
- goto pfail;
- }
-
- /* allocate buffers for control messages */
- if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
- err ("out of memory");
- goto pfail;
- }
-
- /* start the interrupt endpoint */
- if (auerswald_int_open (cp)) {
- err ("int endpoint failed");
- goto pfail;
- }
-
- /* all OK */
- usb_set_intfdata (intf, cp);
- return 0;
-
- /* Error exit: clean up the memory */
-pfail: auerswald_delete (cp);
- return -EIO;
-}
-
-
-/* Disconnect driver from a served device
-
- This function is called whenever a device which was served by this driver
- is disconnected.
-
- The argument dev specifies the device context and the driver_context
- returns a pointer to the previously registered driver_context of the
- probe function. After returning from the disconnect function the USB
- framework completely deallocates all data structures associated with
- this device. So especially the usb_device structure must not be used
- any longer by the usb driver.
-*/
-static void auerswald_disconnect (struct usb_interface *intf)
-{
- pauerswald_t cp = usb_get_intfdata (intf);
- unsigned int u;
-
- usb_set_intfdata (intf, NULL);
- if (!cp)
- return;
-
- /* give back our USB minor number */
- usb_deregister_dev(intf, &auerswald_class);
-
- mutex_lock(&cp->mutex);
- info ("device /dev/%s now disconnecting", cp->name);
-
- /* Stop the interrupt endpoint */
- auerswald_int_release (cp);
-
- /* remove the control chain allocated in auerswald_probe
- This has the benefit of
- a) all pending (a)synchronous urbs are unlinked
- b) all buffers dealing with urbs are reclaimed
- */
- auerchain_free (&cp->controlchain);
-
- if (cp->open_count == 0) {
- /* nobody is using this device. So we can clean up now */
- mutex_unlock(&cp->mutex);
- /* mutex_unlock() is possible here because no other task
- can open the device (see above). I don't want
- to kfree() a locked mutex. */
-
- auerswald_delete (cp);
- } else {
- /* device is used. Remove the pointer to the
- usb device (it's not valid any more). The last
- release() will do the clean up */
- cp->usbdev = NULL;
- mutex_unlock(&cp->mutex);
- /* Terminate waiting writers */
- wake_up (&cp->bufferwait);
- /* Inform all waiting readers */
- for ( u = 0; u < AUH_TYPESIZE; u++) {
- pauerscon_t scp = cp->services[u];
- if (scp)
- scp->disconnect( scp);
- }
- }
-}
-
-/* Descriptor for the devices which are served by this driver.
- NOTE: this struct is parsed by the usbmanager install scripts.
- Don't change without caution!
-*/
-static struct usb_device_id auerswald_ids [] = {
- { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */
- { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
- { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
- { } /* Terminating entry */
-};
-
-/* Standard module device table */
-MODULE_DEVICE_TABLE (usb, auerswald_ids);
-
-/* Standard usb driver struct */
-static struct usb_driver auerswald_driver = {
- .name = "auerswald",
- .probe = auerswald_probe,
- .disconnect = auerswald_disconnect,
- .id_table = auerswald_ids,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Module loading/unloading */
-
-/* Driver initialisation. Called after module loading.
- NOTE: there is no concurrency at _init
-*/
-static int __init auerswald_init (void)
-{
- int result;
- dbg ("init");
-
- /* register driver at the USB subsystem */
- result = usb_register (&auerswald_driver);
- if (result < 0) {
- err ("driver could not be registered");
- return -1;
- }
- return 0;
-}
-
-/* Driver deinit. Called before module removal.
- NOTE: there is no concurrency at _cleanup
-*/
-static void __exit auerswald_cleanup (void)
-{
- dbg ("cleanup");
- usb_deregister (&auerswald_driver);
-}
-
-/* --------------------------------------------------------------------- */
-/* Linux device driver module description */
-
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION (DRIVER_DESC);
-MODULE_LICENSE ("GPL");
-
-module_init (auerswald_init);
-module_exit (auerswald_cleanup);
-
-/* --------------------------------------------------------------------- */
-
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e6ca997..a4ef77e 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/poll.h>
-#include <linux/version.h>
#include <linux/usb/iowarrior.h>
/* Version Information */
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index d94aa73..b897f65 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -48,7 +48,8 @@ static int isight_firmware_load(struct usb_interface *intf,
if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
printk(KERN_ERR "Unable to load isight firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
ptr = firmware->data;
@@ -91,7 +92,6 @@ static int isight_firmware_load(struct usb_interface *intf,
buf, llen, 300) != llen) {
printk(KERN_ERR
"Failed to load isight firmware\n");
- kfree(buf);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index fbace41..69c34a5 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3270,6 +3270,7 @@ static struct usb_device_id sisusb_table [] = {
{ USB_DEVICE(0x0711, 0x0900) },
{ USB_DEVICE(0x0711, 0x0901) },
{ USB_DEVICE(0x0711, 0x0902) },
+ { USB_DEVICE(0x0711, 0x0918) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 0000000..a001748
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,175 @@
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+comment "Enable Host or Gadget support to see Inventra options"
+ depends on !USB && USB_GADGET=n
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+ depends on (USB || USB_GADGET) && HAVE_CLK
+ select TWL4030_USB if MACH_OMAP_3430SDP
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+ help
+ Say Y here if your system has a dual role high speed USB
+ controller based on the Mentor Graphics silicon IP. Then
+ configure options to match your silicon and the board
+ it's being used with, including the USB peripheral role,
+ or the USB host role, or both.
+
+ Texas Instruments parts using this IP include DaVinci 644x,
+ OMAP 243x, OMAP 343x, and TUSB 6010.
+
+ If you do not know what this is, please say N.
+
+ To compile this driver as a module, choose M here; the
+ module will be called "musb_hdrc".
+
+config USB_MUSB_SOC
+ boolean
+ depends on USB_MUSB_HDRC
+ default y if ARCH_DAVINCI
+ default y if ARCH_OMAP2430
+ default y if ARCH_OMAP34XX
+ help
+ Use a static <asm/arch/hdrc_cnf.h> file to describe how the
+ controller is configured (endpoints, mechanisms, etc) on the
+ current iteration of a given system-on-chip.
+
+comment "DaVinci 644x USB support"
+ depends on USB_MUSB_HDRC && ARCH_DAVINCI
+
+comment "OMAP 243x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP2430
+
+comment "OMAP 343x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+
+config USB_TUSB6010
+ boolean "TUSB 6010 support"
+ depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+ default y
+ help
+ The TUSB 6010 chip, from Texas Instruments, connects a discrete
+ HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
+ (a high speed serial link). It can use system-specific external
+ DMA controllers.
+
+choice
+ prompt "Driver Mode"
+ depends on USB_MUSB_HDRC
+ help
+ Dual-Role devices can support both host and peripheral roles,
+ as well as a the special "OTG Device" role which can switch
+ between both roles as needed.
+
+# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
+# OTG needs both roles, not just USB_MUSB_HOST.
+config USB_MUSB_HOST
+ depends on USB
+ bool "USB Host"
+ help
+ Say Y here if your system supports the USB host role.
+ If it has a USB "A" (rectangular), "Mini-A" (uncommon),
+ or "Mini-AB" connector, it supports the host role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
+# side support ... OTG needs both roles
+config USB_MUSB_PERIPHERAL
+ depends on USB_GADGET
+ bool "USB Peripheral (gadget stack)"
+ select USB_GADGET_MUSB_HDRC
+ help
+ Say Y here if your system supports the USB peripheral role.
+ If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
+ connector, it supports the peripheral role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+config USB_MUSB_OTG
+ depends on USB && USB_GADGET && PM && EXPERIMENTAL
+ bool "Both host and peripheral: USB OTG (On The Go) Device"
+ select USB_GADGET_MUSB_HDRC
+ select USB_OTG
+ help
+ The most notable feature of USB OTG is support for a
+ "Dual-Role" device, which can act as either a device
+ or a host. The initial role choice can be changed
+ later, when two dual-role devices talk to each other.
+
+ At this writing, the OTG support in this driver is incomplete,
+ omitting the mandatory HNP or SRP protocols. However, some
+ of the cable based role switching works. (That is, grounding
+ the ID pin switches the controller to host mode, while leaving
+ it floating leaves it in peripheral mode.)
+
+ Select this if your system has a Mini-AB connector, or
+ to simplify certain kinds of configuration.
+
+ To implement your OTG Targeted Peripherals List (TPL), enable
+ USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
+ to match your requirements.
+
+endchoice
+
+# enable peripheral support (including with OTG)
+config USB_GADGET_MUSB_HDRC
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+# default y
+# select USB_GADGET_DUALSPEED
+# select USB_GADGET_SELECTED
+
+# enables host support (including with OTG)
+config USB_MUSB_HDRC_HCD
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
+ select USB_OTG if USB_GADGET_MUSB_HDRC
+ default y
+
+
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ depends on USB_MUSB_HDRC
+ default y if USB_TUSB6010
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not select 'n' here unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+config USB_INVENTRA_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default ARCH_OMAP2430 || ARCH_OMAP34XX
+ help
+ Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default ARCH_DAVINCI
+ help
+ Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TUSB_OMAP_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ depends on USB_TUSB6010
+ depends on ARCH_OMAP
+ default y
+ help
+ Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+config USB_MUSB_DEBUG
+ depends on USB_MUSB_HDRC
+ bool "Enable debugging messages"
+ default n
+ help
+ This enables musb debugging. To set the logging level use the debug
+ module parameter. Starting at level 3, per-transfer (urb, usb_request,
+ packet, or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 0000000..b6af0d6
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,69 @@
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+musb_hdrc-objs := musb_core.o
+
+obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
+
+ifeq ($(CONFIG_ARCH_DAVINCI),y)
+ musb_hdrc-objs += davinci.o
+endif
+
+ifeq ($(CONFIG_USB_TUSB6010),y)
+ musb_hdrc-objs += tusb6010.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP2430),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP3430),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
+ musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
+ musb_hdrc-objs += musb_virthub.o musb_host.o
+endif
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO only, or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
+
+ ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+ musb_hdrc-objs += musbhsdma.o
+
+ else
+ ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+ musb_hdrc-objs += cppi_dma.o
+
+ else
+ ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+ musb_hdrc-objs += tusb6010_omap.o
+
+ endif
+ endif
+ endif
+endif
+
+
+################################################################################
+
+# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
+
+ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
+ EXTRA_CFLAGS += -DMUSB_AHB_ID
+endif
+
+# Debugging
+
+ifeq ($(CONFIG_USB_MUSB_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 0000000..5ad6d08
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
+ */
+
+#include <linux/usb.h>
+
+#include "musb_core.h"
+#include "cppi_dma.h"
+
+
+/* CPPI DMA status 7-mar-2006:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ * which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug-2006 (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ * evidently after the 1 byte packet was received and acked, the queue
+ * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
+ * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
+ * of its next (512 byte) packet. IRQ issues?
+ *
+ * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD 64
+#define NUM_RXCHAN_BD 64
+
+static inline void cpu_drain_writebuffer(void)
+{
+ wmb();
+#ifdef CONFIG_CPU_ARM926T
+ /* REVISIT this "should not be needed",
+ * but lack of it sure seemed to hurt ...
+ */
+ asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
+{
+ struct cppi_descriptor *bd = c->freelist;
+
+ if (bd)
+ c->freelist = bd->next;
+ return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+ if (!bd)
+ return;
+ bd->next = c->freelist;
+ c->freelist = bd;
+}
+
+/*
+ * Start DMA controller
+ *
+ * Initialize the DMA controller as necessary.
+ */
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
+{
+ musb_writel(&rx->rx_skipbytes, 0, 0);
+ musb_writel(&rx->rx_head, 0, 0);
+ musb_writel(&rx->rx_sop, 0, 0);
+ musb_writel(&rx->rx_current, 0, 0);
+ musb_writel(&rx->rx_buf_current, 0, 0);
+ musb_writel(&rx->rx_len_len, 0, 0);
+ musb_writel(&rx->rx_cnt_cnt, 0, 0);
+}
+
+/* zero out entire tx state RAM entry for the channel */
+static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+{
+ musb_writel(&tx->tx_head, 0, 0);
+ musb_writel(&tx->tx_buf, 0, 0);
+ musb_writel(&tx->tx_current, 0, 0);
+ musb_writel(&tx->tx_buf_current, 0, 0);
+ musb_writel(&tx->tx_info, 0, 0);
+ musb_writel(&tx->tx_rem_len, 0, 0);
+ /* musb_writel(&tx->tx_dummy, 0, 0); */
+ musb_writel(&tx->tx_complete, 0, ptr);
+}
+
+static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+ int j;
+
+ /* initialize channel fields */
+ c->head = NULL;
+ c->tail = NULL;
+ c->last_processed = NULL;
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = cppi;
+ c->is_rndis = 0;
+ c->freelist = NULL;
+
+ /* build the BD Free list for the channel */
+ for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+ struct cppi_descriptor *bd;
+ dma_addr_t dma;
+
+ bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
+ bd->dma = dma;
+ cppi_bd_free(c, bd);
+ }
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+ struct cppi *cppi = c->controller;
+ struct cppi_descriptor *bd;
+
+ (void) cppi_channel_abort(&c->channel);
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = NULL;
+
+ /* free all its bds */
+ bd = c->last_processed;
+ do {
+ if (bd)
+ dma_pool_free(cppi->pool, bd, bd->dma);
+ bd = cppi_bd_alloc(c);
+ } while (bd);
+ c->last_processed = NULL;
+}
+
+static int __init cppi_controller_start(struct dma_controller *c)
+{
+ struct cppi *controller;
+ void __iomem *tibase;
+ int i;
+
+ controller = container_of(c, struct cppi, controller);
+
+ /* do whatever is necessary to start controller */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ controller->tx[i].transmit = true;
+ controller->tx[i].index = i;
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ controller->rx[i].transmit = false;
+ controller->rx[i].index = i;
+ }
+
+ /* setup BD list on a per channel basis */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
+ cppi_pool_init(controller, controller->tx + i);
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_init(controller, controller->rx + i);
+
+ tibase = controller->tibase;
+ INIT_LIST_HEAD(&controller->tx_complete);
+
+ /* initialise tx/rx channel head pointers to zero */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ struct cppi_channel *tx_ch = controller->tx + i;
+ struct cppi_tx_stateram __iomem *tx;
+
+ INIT_LIST_HEAD(&tx_ch->tx_complete);
+
+ tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+ tx_ch->state_ram = tx;
+ cppi_reset_tx(tx, 0);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ struct cppi_channel *rx_ch = controller->rx + i;
+ struct cppi_rx_stateram __iomem *rx;
+
+ INIT_LIST_HEAD(&rx_ch->tx_complete);
+
+ rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+ rx_ch->state_ram = rx;
+ cppi_reset_rx(rx);
+ }
+
+ /* enable individual cppi channels */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ /* enable tx/rx CPPI control */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+ /* disable RNDIS mode, also host rx RNDIS autorequest */
+ musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
+
+ return 0;
+}
+
+/*
+ * Stop DMA controller
+ *
+ * De-Init the DMA controller as necessary.
+ */
+
+static int cppi_controller_stop(struct dma_controller *c)
+{
+ struct cppi *controller;
+ void __iomem *tibase;
+ int i;
+
+ controller = container_of(c, struct cppi, controller);
+
+ tibase = controller->tibase;
+ /* DISABLE INDIVIDUAL CHANNEL Interrupts */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ DBG(1, "Tearing down RX and TX Channels\n");
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ /* FIXME restructure of txdma to use bds like rxdma */
+ controller->tx[i].last_processed = NULL;
+ cppi_pool_free(controller->tx + i);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_free(controller->rx + i);
+
+ /* in Tx Case proper teardown is supported. We resort to disabling
+ * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+ * complete TX CPPI cannot be disabled.
+ */
+ /*disable tx/rx cppi */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+
+ return 0;
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side. We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *ep, u8 transmit)
+{
+ struct cppi *controller;
+ u8 index;
+ struct cppi_channel *cppi_ch;
+ void __iomem *tibase;
+
+ controller = container_of(c, struct cppi, controller);
+ tibase = controller->tibase;
+
+ /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
+ index = ep->epnum - 1;
+
+ /* return the corresponding CPPI Channel Handle, and
+ * probably disable the non-CPPI irq until we need it.
+ */
+ if (transmit) {
+ if (index >= ARRAY_SIZE(controller->tx)) {
+ DBG(1, "no %cX%d CPPI channel\n", 'T', index);
+ return NULL;
+ }
+ cppi_ch = controller->tx + index;
+ } else {
+ if (index >= ARRAY_SIZE(controller->rx)) {
+ DBG(1, "no %cX%d CPPI channel\n", 'R', index);
+ return NULL;
+ }
+ cppi_ch = controller->rx + index;
+ core_rxirq_disable(tibase, ep->epnum);
+ }
+
+ /* REVISIT make this an error later once the same driver code works
+ * with the other DMA engine too
+ */
+ if (cppi_ch->hw_ep)
+ DBG(1, "re-allocating DMA%d %cX channel %p\n",
+ index, transmit ? 'T' : 'R', cppi_ch);
+ cppi_ch->hw_ep = ep;
+ cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+ return &cppi_ch->channel;
+}
+
+/* Release a CPPI Channel. */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+ struct cppi_channel *c;
+ void __iomem *tibase;
+
+ /* REVISIT: for paranoia, check state and abort if needed... */
+
+ c = container_of(channel, struct cppi_channel, channel);
+ tibase = c->controller->tibase;
+ if (!c->hw_ep)
+ DBG(1, "releasing idle DMA channel %p\n", c);
+ else if (!c->transmit)
+ core_rxirq_enable(tibase, c->index + 1);
+
+ /* for now, leave its cppi IRQ enabled (we won't trigger it) */
+ c->hw_ep = NULL;
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_rx_stateram __iomem *rx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ DBG(level, "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x"
+ "\n",
+ c->index, tag,
+ musb_readl(c->controller->tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
+ musb_readw(c->hw_ep->regs, MUSB_RXCSR),
+
+ musb_readl(&rx->rx_skipbytes, 0),
+ musb_readl(&rx->rx_head, 0),
+ musb_readl(&rx->rx_sop, 0),
+ musb_readl(&rx->rx_current, 0),
+
+ musb_readl(&rx->rx_buf_current, 0),
+ musb_readl(&rx->rx_len_len, 0),
+ musb_readl(&rx->rx_cnt_cnt, 0),
+ musb_readl(&rx->rx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_tx_stateram __iomem *tx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ DBG(level, "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x"
+ "\n",
+ c->index, tag,
+ musb_readw(c->hw_ep->regs, MUSB_TXCSR),
+
+ musb_readl(&tx->tx_head, 0),
+ musb_readl(&tx->tx_buf, 0),
+ musb_readl(&tx->tx_current, 0),
+ musb_readl(&tx->tx_buf_current, 0),
+
+ musb_readl(&tx->tx_info, 0),
+ musb_readl(&tx->tx_rem_len, 0),
+ /* dummy/unused word 6 */
+ musb_readl(&tx->tx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+ void __iomem *tibase, int is_rndis)
+{
+ /* we may need to change the rndis flag for this cppi channel */
+ if (c->is_rndis != is_rndis) {
+ u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
+ u32 temp = 1 << (c->index);
+
+ if (is_rx)
+ temp <<= 16;
+ if (is_rndis)
+ value |= temp;
+ else
+ value &= ~temp;
+ musb_writel(tibase, DAVINCI_RNDIS_REG, value);
+ c->is_rndis = is_rndis;
+ }
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+ pr_debug("RXBD/%s %08x: "
+ "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+ tag, bd->dma,
+ bd->hw_next, bd->hw_bufp, bd->hw_off_len,
+ bd->hw_options);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+#if MUSB_DEBUG > 0
+ struct cppi_descriptor *bd;
+
+ if (!_dbg_level(level))
+ return;
+ cppi_dump_rx(level, rx, tag);
+ if (rx->last_processed)
+ cppi_dump_rxbd("last", rx->last_processed);
+ for (bd = rx->head; bd; bd = bd->next)
+ cppi_dump_rxbd("active", bd);
+#endif
+}
+
+
+/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+ void __iomem *tibase, int onepacket, unsigned n_bds)
+{
+ u32 val;
+
+#ifdef RNDIS_RX_IS_USABLE
+ u32 tmp;
+ /* assert(is_host_active(musb)) */
+
+ /* start from "AutoReq never" */
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ val = tmp & ~((0x3) << (rx->index * 2));
+
+ /* HCD arranged reqpkt for packet #1. we arrange int
+ * for all but the last one, maybe in two segments.
+ */
+ if (!onepacket) {
+#if 0
+ /* use two segments, autoreq "all" then the last "never" */
+ val |= ((0x3) << (rx->index * 2));
+ n_bds--;
+#else
+ /* one segment, autoreq "all-but-last" */
+ val |= ((0x1) << (rx->index * 2));
+#endif
+ }
+
+ if (val != tmp) {
+ int n = 100;
+
+ /* make sure that autoreq is updated before continuing */
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+ do {
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ if (tmp == val)
+ break;
+ cpu_relax();
+ } while (n-- > 0);
+ }
+#endif
+
+ /* REQPKT is turned off after each segment */
+ if (n_bds && rx->channel.actual_len) {
+ void __iomem *regs = rx->hw_ep->regs;
+
+ val = musb_readw(regs, MUSB_RXCSR);
+ if (!(val & MUSB_RXCSR_H_REQPKT)) {
+ val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, val);
+ /* flush writebufer */
+ val = musb_readw(regs, MUSB_RXCSR);
+ }
+ }
+ return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ * - RX builds new queues each time, to help handle routine "early
+ * termination" cases (faults, including errors and short reads)
+ * more correctly.
+ *
+ * - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma). Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths. (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often. RNDIS mode seems to behave too
+ * (except how it handles the exactly-N-packets case). Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length. It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+ unsigned maxpacket = tx->maxpacket;
+ dma_addr_t addr = tx->buf_dma + tx->offset;
+ size_t length = tx->buf_len - tx->offset;
+ struct cppi_descriptor *bd;
+ unsigned n_bds;
+ unsigned i;
+ struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
+ int rndis;
+
+ /* TX can use the CPPI "rndis" mode, where we can probably fit this
+ * transfer in one BD and one IRQ. The only time we would NOT want
+ * to use it is when hardware constraints prevent it, or if we'd
+ * trigger the "send a ZLP?" confusion.
+ */
+ rndis = (maxpacket & 0x3f) == 0
+ && length < 0xffff
+ && (length % maxpacket) != 0;
+
+ if (rndis) {
+ maxpacket = length;
+ n_bds = 1;
+ } else {
+ n_bds = length / maxpacket;
+ if (!length || (length % maxpacket))
+ n_bds++;
+ n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+ length = min(n_bds * maxpacket, length);
+ }
+
+ DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+ tx->index,
+ maxpacket,
+ rndis ? "rndis" : "transparent",
+ n_bds,
+ addr, length);
+
+ cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+ /* assuming here that channel_program is called during
+ * transfer initiation ... current code maintains state
+ * for one outstanding request only (no queues, not even
+ * the implicit ones of an iso urb).
+ */
+
+ bd = tx->freelist;
+ tx->head = bd;
+ tx->last_processed = NULL;
+
+ /* FIXME use BD pool like RX side does, and just queue
+ * the minimum number for this request.
+ */
+
+ /* Prepare queue of BDs first, then hand it to hardware.
+ * All BDs except maybe the last should be of full packet
+ * size; for RNDIS there _is_ only that last packet.
+ */
+ for (i = 0; i < n_bds; ) {
+ if (++i < n_bds && bd->next)
+ bd->hw_next = bd->next->dma;
+ else
+ bd->hw_next = 0;
+
+ bd->hw_bufp = tx->buf_dma + tx->offset;
+
+ /* FIXME set EOP only on the last packet,
+ * SOP only on the first ... avoid IRQs
+ */
+ if ((tx->offset + maxpacket) <= tx->buf_len) {
+ tx->offset += maxpacket;
+ bd->hw_off_len = maxpacket;
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | maxpacket;
+ } else {
+ /* only this one may be a partial USB Packet */
+ u32 partial_len;
+
+ partial_len = tx->buf_len - tx->offset;
+ tx->offset = tx->buf_len;
+ bd->hw_off_len = partial_len;
+
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | partial_len;
+ if (partial_len == 0)
+ bd->hw_options |= CPPI_ZERO_SET;
+ }
+
+ DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ /* update the last BD enqueued to the list */
+ tx->tail = bd;
+ bd = bd->next;
+ }
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* Write to the HeadPtr in state RAM to trigger */
+ musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
+
+ cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes. How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
+ * (b) loses **BADLY** because nothing (!) happens when that second packet
+ * fills the buffer, much less when a third one arrives. (Which makes this
+ * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
+ * is optional, and it's fine if peripherals -- not hosts! -- pad messages
+ * out to end-of-buffer. Standard PCI host controller DMA descriptors
+ * implement that mode by default ... which is no accident.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
+ * ignores SOP/EOP markings and processes both of those BDs; so both packets
+ * are loaded into the buffer (with a 212 byte gap between them), and the next
+ * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ * are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ * reliably make both cases work, with software handling both cases correctly
+ * and at the significant penalty of needing an IRQ per packet. (The lack of
+ * I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include RNDIS host drivers, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
+ * that applies best on the peripheral side (and which could fail rudely).
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class. Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+
+/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
+ *
+ * IFF
+ * (a) peripheral mode ... since rndis peripherals could pad their
+ * writes to hosts, causing i/o failure; or we'd have to cope with
+ * a largely unknowable variety of host side protocol variants
+ * (b) and short reads are NOT errors ... since full reads would
+ * cause those same i/o failures
+ * (c) and read length is
+ * - less than 64KB (max per cppi descriptor)
+ * - not a multiple of 4096 (g_zero default, full reads typical)
+ * - N (>1) packets long, ditto (full reads not EXPECTED)
+ * THEN
+ * try rx rndis mode
+ *
+ * Cost of heuristic failing: RXDMA wedges at the end of transfers that
+ * fill out the whole buffer. Buggy host side usb network drivers could
+ * trigger that, but "in the field" such bugs seem to be all but unknown.
+ *
+ * So this module parameter lets the heuristic be disabled. When using
+ * gadgetfs, the heuristic will probably need to be disabled.
+ */
+static int cppi_rx_rndis = 1;
+
+module_param(cppi_rx_rndis, bool, 0);
+MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
+
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ * performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode) with complete safety..
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+ unsigned maxpacket = rx->maxpacket;
+ dma_addr_t addr = rx->buf_dma + rx->offset;
+ size_t length = rx->buf_len - rx->offset;
+ struct cppi_descriptor *bd, *tail;
+ unsigned n_bds;
+ unsigned i;
+ void __iomem *tibase = musb->ctrl_base;
+ int is_rndis = 0;
+ struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
+
+ if (onepacket) {
+ /* almost every USB driver, host or peripheral side */
+ n_bds = 1;
+
+ /* maybe apply the heuristic above */
+ if (cppi_rx_rndis
+ && is_peripheral_active(musb)
+ && length > maxpacket
+ && (length & ~0xffff) == 0
+ && (length & 0x0fff) != 0
+ && (length & (maxpacket - 1)) == 0) {
+ maxpacket = length;
+ is_rndis = 1;
+ }
+ } else {
+ /* virtually nothing except mass storage class */
+ if (length > 0xffff) {
+ n_bds = 0xffff / maxpacket;
+ length = n_bds * maxpacket;
+ } else {
+ n_bds = length / maxpacket;
+ if (length % maxpacket)
+ n_bds++;
+ }
+ if (n_bds == 1)
+ onepacket = 1;
+ else
+ n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+ }
+
+ /* In host mode, autorequest logic can generate some IN tokens; it's
+ * tricky since we can't leave REQPKT set in RXCSR after the transfer
+ * finishes. So: multipacket transfers involve two or more segments.
+ * And always at least two IRQs ... RNDIS mode is not an option.
+ */
+ if (is_host_active(musb))
+ n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+ cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
+
+ length = min(n_bds * maxpacket, length);
+
+ DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%x len %u %u/%u\n",
+ rx->index, maxpacket,
+ onepacket
+ ? (is_rndis ? "rndis" : "onepacket")
+ : "multipacket",
+ n_bds,
+ musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff,
+ addr, length, rx->channel.actual_len, rx->buf_len);
+
+ /* only queue one segment at a time, since the hardware prevents
+ * correct queue shutdown after unexpected short packets
+ */
+ bd = cppi_bd_alloc(rx);
+ rx->head = bd;
+
+ /* Build BDs for all packets in this segment */
+ for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+ u32 bd_len;
+
+ if (i) {
+ bd = cppi_bd_alloc(rx);
+ if (!bd)
+ break;
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+ bd->hw_next = 0;
+
+ /* all but the last packet will be maxpacket size */
+ if (maxpacket < length)
+ bd_len = maxpacket;
+ else
+ bd_len = length;
+
+ bd->hw_bufp = addr;
+ addr += bd_len;
+ rx->offset += bd_len;
+
+ bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
+ bd->buflen = bd_len;
+
+ bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
+ length -= bd_len;
+ }
+
+ /* we always expect at least one reusable BD! */
+ if (!tail) {
+ WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
+ return;
+ } else if (i < n_bds)
+ WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
+
+ tail->next = NULL;
+ tail->hw_next = 0;
+
+ bd = rx->head;
+ rx->tail = tail;
+
+ /* short reads and other faults should terminate this entire
+ * dma segment. we want one "dma packet" per dma segment, not
+ * one per USB packet, terminating the whole queue at once...
+ * NOTE that current hardware seems to ignore SOP and EOP.
+ */
+ bd->hw_options |= CPPI_SOP_SET;
+ tail->hw_options |= CPPI_EOP_SET;
+
+ if (debug >= 5) {
+ struct cppi_descriptor *d;
+
+ for (d = rx->head; d; d = d->next)
+ cppi_dump_rxbd("S", d);
+ }
+
+ /* in case the preceding transfer left some state... */
+ tail = rx->last_processed;
+ if (tail) {
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+
+ core_rxirq_enable(tibase, rx->index + 1);
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* REVISIT specs say to write this AFTER the BUFCNT register
+ * below ... but that loses badly.
+ */
+ musb_writel(&rx_ram->rx_head, 0, bd->dma);
+
+ /* bufferCount must be at least 3, and zeroes on completion
+ * unless it underflows below zero, or stops at two, or keeps
+ * growing ... grr.
+ */
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+
+ if (!i)
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ else if (n_bds > (i - 3))
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds - (i - 3));
+
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+ if (i < (2 + n_bds)) {
+ DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+ rx->index, i, n_bds);
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ }
+
+ cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @ch: the channel
+ * @maxpacket: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ * all short reads as errors and kick in high level fault recovery.
+ * For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @len: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *ch,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ struct musb *musb;
+
+ cppi_ch = container_of(ch, struct cppi_channel, channel);
+ controller = cppi_ch->controller;
+ musb = controller->musb;
+
+ switch (ch->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* fault irq handler should have handled cleanup */
+ WARNING("%cX DMA%d not cleaned up after abort!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_BUSY:
+ WARNING("program active channel? %cX DMA%d\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ DBG(1, "%cX DMA%d not allocated!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* FALLTHROUGH */
+ case MUSB_DMA_STATUS_FREE:
+ break;
+ }
+
+ ch->status = MUSB_DMA_STATUS_BUSY;
+
+ /* set transfer parameters, then queue up its first segment */
+ cppi_ch->buf_dma = dma_addr;
+ cppi_ch->offset = 0;
+ cppi_ch->maxpacket = maxpacket;
+ cppi_ch->buf_len = len;
+
+ /* TX channel? or RX? */
+ if (cppi_ch->transmit)
+ cppi_next_tx_segment(musb, cppi_ch);
+ else
+ cppi_next_rx_segment(musb, cppi_ch, mode);
+
+ return true;
+}
+
+static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+ struct cppi_channel *rx = &cppi->rx[ch];
+ struct cppi_rx_stateram __iomem *state = rx->state_ram;
+ struct cppi_descriptor *bd;
+ struct cppi_descriptor *last = rx->last_processed;
+ bool completed = false;
+ bool acked = false;
+ int i;
+ dma_addr_t safe2ack;
+ void __iomem *regs = rx->hw_ep->regs;
+
+ cppi_dump_rx(6, rx, "/K");
+
+ bd = last ? last->next : rx->head;
+ if (!bd)
+ return false;
+
+ /* run through all completed BDs */
+ for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
+ (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (!completed && (bd->hw_options & CPPI_OWN_SET))
+ break;
+
+ DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)\n",
+ bd->dma, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options,
+ rx->channel.actual_len);
+
+ /* actual packet received length */
+ if ((bd->hw_options & CPPI_SOP_SET) && !completed)
+ len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
+ else
+ len = 0;
+
+ if (bd->hw_options & CPPI_EOQ_MASK)
+ completed = true;
+
+ if (!completed && len < bd->buflen) {
+ /* NOTE: when we get a short packet, RXCSR_H_REQPKT
+ * must have been cleared, and no more DMA packets may
+ * active be in the queue... TI docs didn't say, but
+ * CPPI ignores those BDs even though OWN is still set.
+ */
+ completed = true;
+ DBG(3, "rx short %d/%d (%d)\n",
+ len, bd->buflen,
+ rx->channel.actual_len);
+ }
+
+ /* If we got here, we expect to ack at least one BD; meanwhile
+ * CPPI may completing other BDs while we scan this list...
+ *
+ * RACE: we can notice OWN cleared before CPPI raises the
+ * matching irq by writing that BD as the completion pointer.
+ * In such cases, stop scanning and wait for the irq, avoiding
+ * lost acks and states where BD ownership is unclear.
+ */
+ if (bd->dma == safe2ack) {
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ safe2ack = musb_readl(&state->rx_complete, 0);
+ acked = true;
+ if (bd->dma == safe2ack)
+ safe2ack = 0;
+ }
+
+ rx->channel.actual_len += len;
+
+ cppi_bd_free(rx, last);
+ last = bd;
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+ rx->last_processed = last;
+
+ /* dma abort, lost ack, or ... */
+ if (!acked && last) {
+ int csr;
+
+ if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ if (safe2ack == 0) {
+ cppi_bd_free(rx, last);
+ rx->last_processed = NULL;
+
+ /* if we land here on the host side, H_REQPKT will
+ * be clear and we need to restart the queue...
+ */
+ WARN_ON(rx->head);
+ }
+ musb_ep_select(cppi->mregs, rx->index + 1);
+ csr = musb_readw(regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_DMAENAB) {
+ DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+ rx->index,
+ rx->head, rx->tail,
+ rx->last_processed
+ ? rx->last_processed->dma
+ : 0,
+ completed ? ", completed" : "",
+ csr);
+ cppi_dump_rxq(4, "/what?", rx);
+ }
+ }
+ if (!completed) {
+ int csr;
+
+ rx->head = bd;
+
+ /* REVISIT seems like "autoreq all but EOP" doesn't...
+ * setting it here "should" be racey, but seems to work
+ */
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ if (is_host_active(cppi->musb)
+ && bd
+ && !(csr & MUSB_RXCSR_H_REQPKT)) {
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(regs, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | csr);
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ }
+ } else {
+ rx->head = NULL;
+ rx->tail = NULL;
+ }
+
+ cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+ return completed;
+}
+
+void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+{
+ void __iomem *tibase;
+ int i, index;
+ struct cppi *cppi;
+ struct musb_hw_ep *hw_ep = NULL;
+
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+
+ tibase = musb->ctrl_base;
+
+ /* process TX channels */
+ for (index = 0; tx; tx = tx >> 1, index++) {
+ struct cppi_channel *tx_ch;
+ struct cppi_tx_stateram __iomem *tx_ram;
+ bool completed = false;
+ struct cppi_descriptor *bd;
+
+ if (!(tx & 1))
+ continue;
+
+ tx_ch = cppi->tx + index;
+ tx_ram = tx_ch->state_ram;
+
+ /* FIXME need a cppi_tx_scan() routine, which
+ * can also be called from abort code
+ */
+
+ cppi_dump_tx(5, tx_ch, "/E");
+
+ bd = tx_ch->head;
+
+ if (NULL == bd) {
+ DBG(1, "null BD\n");
+ continue;
+ }
+
+ /* run through all completed BDs */
+ for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (bd->hw_options & CPPI_OWN_SET)
+ break;
+
+ DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
+ tx_ch->channel.actual_len += len;
+
+ tx_ch->last_processed = bd;
+
+ /* write completion register to acknowledge
+ * processing of completed BDs, and possibly
+ * release the IRQ; EOQ might not be set ...
+ *
+ * REVISIT use the same ack strategy as rx
+ *
+ * REVISIT have observed bit 18 set; huh??
+ */
+ /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
+ musb_writel(&tx_ram->tx_complete, 0, bd->dma);
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+
+ /* on end of segment, maybe go to next one */
+ if (completed) {
+ /* cppi_dump_tx(4, tx_ch, "/complete"); */
+
+ /* transfer more, or report completion */
+ if (tx_ch->offset >= tx_ch->buf_len) {
+ tx_ch->head = NULL;
+ tx_ch->tail = NULL;
+ tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = tx_ch->hw_ep;
+
+ /* Peripheral role never repurposes the
+ * endpoint, so immediate completion is
+ * safe. Host role waits for the fifo
+ * to empty (TXPKTRDY irq) before going
+ * to the next queued bulk transfer.
+ */
+ if (is_host_active(cppi->musb)) {
+#if 0
+ /* WORKAROUND because we may
+ * not always get TXKPTRDY ...
+ */
+ int csr;
+
+ csr = musb_readw(hw_ep->regs,
+ MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+#endif
+ completed = false;
+ }
+ if (completed)
+ musb_dma_completion(musb, index + 1, 1);
+
+ } else {
+ /* Bigger transfer than we could fit in
+ * that first batch of descriptors...
+ */
+ cppi_next_tx_segment(musb, tx_ch);
+ }
+ } else
+ tx_ch->head = bd;
+ }
+
+ /* Start processing the RX block */
+ for (index = 0; rx; rx = rx >> 1, index++) {
+
+ if (rx & 1) {
+ struct cppi_channel *rx_ch;
+
+ rx_ch = cppi->rx + index;
+
+ /* let incomplete dma segments finish */
+ if (!cppi_rx_scan(cppi, index))
+ continue;
+
+ /* start another dma segment if needed */
+ if (rx_ch->channel.actual_len != rx_ch->buf_len
+ && rx_ch->channel.actual_len
+ == rx_ch->offset) {
+ cppi_next_rx_segment(musb, rx_ch, 1);
+ continue;
+ }
+
+ /* all segments completed! */
+ rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = rx_ch->hw_ep;
+
+ core_rxirq_disable(tibase, index + 1);
+ musb_dma_completion(musb, index + 1, 0);
+ }
+ }
+
+ /* write to CPPI EOI register to re-enable interrupts */
+ musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+}
+
+/* Instantiate a software object representing a DMA controller. */
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *mregs)
+{
+ struct cppi *controller;
+
+ controller = kzalloc(sizeof *controller, GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->mregs = mregs;
+ controller->tibase = mregs - DAVINCI_BASE_OFFSET;
+
+ controller->musb = musb;
+ controller->controller.start = cppi_controller_start;
+ controller->controller.stop = cppi_controller_stop;
+ controller->controller.channel_alloc = cppi_channel_allocate;
+ controller->controller.channel_release = cppi_channel_release;
+ controller->controller.channel_program = cppi_channel_program;
+ controller->controller.channel_abort = cppi_channel_abort;
+
+ /* NOTE: allocating from on-chip SRAM would give the least
+ * contention for memory access, if that ever matters here.
+ */
+
+ /* setup BufferPool */
+ controller->pool = dma_pool_create("cppi",
+ controller->musb->controller,
+ sizeof(struct cppi_descriptor),
+ CPPI_DESCRIPTOR_ALIGN, 0);
+ if (!controller->pool) {
+ kfree(controller);
+ return NULL;
+ }
+
+ return &controller->controller;
+}
+
+/*
+ * Destroy a previously-instantiated DMA controller.
+ */
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi *cppi;
+
+ cppi = container_of(c, struct cppi, controller);
+
+ /* assert: caller stopped the controller first */
+ dma_pool_destroy(cppi->pool);
+
+ kfree(cppi);
+}
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ void __iomem *mbase;
+ void __iomem *tibase;
+ void __iomem *regs;
+ u32 value;
+ struct cppi_descriptor *queue;
+
+ cppi_ch = container_of(channel, struct cppi_channel, channel);
+
+ controller = cppi_ch->controller;
+
+ switch (channel->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* from RX or TX fault irq handler */
+ case MUSB_DMA_STATUS_BUSY:
+ /* the hardware needs shutting down */
+ regs = cppi_ch->hw_ep->regs;
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ case MUSB_DMA_STATUS_FREE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (!cppi_ch->transmit && cppi_ch->head)
+ cppi_dump_rxq(3, "/abort", cppi_ch);
+
+ mbase = controller->mregs;
+ tibase = controller->tibase;
+
+ queue = cppi_ch->head;
+ cppi_ch->head = NULL;
+ cppi_ch->tail = NULL;
+
+ /* REVISIT should rely on caller having done this,
+ * and caller should rely on us not changing it.
+ * peripheral code is safe ... check host too.
+ */
+ musb_ep_select(mbase, cppi_ch->index + 1);
+
+ if (cppi_ch->transmit) {
+ struct cppi_tx_stateram __iomem *tx_ram;
+ int enabled;
+
+ /* mask interrupts raised to signal teardown complete. */
+ enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
+ & (1 << cppi_ch->index);
+ if (enabled)
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ (1 << cppi_ch->index));
+
+ /* REVISIT put timeouts on these controller handshakes */
+
+ cppi_dump_tx(6, cppi_ch, " (teardown)");
+
+ /* teardown DMA engine then usb core */
+ do {
+ value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
+ } while (!(value & CPPI_TEAR_READY));
+ musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
+
+ tx_ram = cppi_ch->state_ram;
+ do {
+ value = musb_readl(&tx_ram->tx_complete, 0);
+ } while (0xFFFFFFFC != value);
+ musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
+
+ /* FIXME clean up the transfer state ... here?
+ * the completion routine should get called with
+ * an appropriate status code.
+ */
+
+ value = musb_readw(regs, MUSB_TXCSR);
+ value &= ~MUSB_TXCSR_DMAENAB;
+ value |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(regs, MUSB_TXCSR, value);
+ musb_writew(regs, MUSB_TXCSR, value);
+
+ /* re-enable interrupt */
+ if (enabled)
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ (1 << cppi_ch->index));
+
+ /* While we scrub the TX state RAM, ensure that we clean
+ * up any interrupt that's currently asserted:
+ * 1. Write to completion Ptr value 0x1(bit 0 set)
+ * (write back mode)
+ * 2. Write to completion Ptr value 0x0(bit 0 cleared)
+ * (compare mode)
+ * Value written is compared(for bits 31:2) and when
+ * equal, interrupt is deasserted.
+ */
+ cppi_reset_tx(tx_ram, 1);
+ musb_writel(&tx_ram->tx_complete, 0, 0);
+
+ cppi_dump_tx(5, cppi_ch, " (done teardown)");
+
+ /* REVISIT tx side _should_ clean up the same way
+ * as the RX side ... this does no cleanup at all!
+ */
+
+ } else /* RX */ {
+ u16 csr;
+
+ /* NOTE: docs don't guarantee any of this works ... we
+ * expect that if the usb core stops telling the cppi core
+ * to pull more data from it, then it'll be safe to flush
+ * current RX DMA state iff any pending fifo transfer is done.
+ */
+
+ core_rxirq_disable(tibase, cppi_ch->index + 1);
+
+ /* for host, ensure ReqPkt is never set again */
+ if (is_host_active(cppi_ch->controller->musb)) {
+ value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ value &= ~((0x3) << (cppi_ch->index * 2));
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
+ }
+
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* for host, clear (just) ReqPkt at end of current packet(s) */
+ if (is_host_active(cppi_ch->controller->musb)) {
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ csr &= ~MUSB_RXCSR_H_REQPKT;
+ } else
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+
+ /* clear dma enable */
+ csr &= ~(MUSB_RXCSR_DMAENAB);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* Quiesce: wait for current dma to finish (if not cleanup).
+ * We can't use bit zero of stateram->rx_sop, since that
+ * refers to an entire "DMA packet" not just emptying the
+ * current fifo. Most segments need multiple usb packets.
+ */
+ if (channel->status == MUSB_DMA_STATUS_BUSY)
+ udelay(50);
+
+ /* scan the current list, reporting any data that was
+ * transferred and acking any IRQ
+ */
+ cppi_rx_scan(controller, cppi_ch->index);
+
+ /* clobber the existing state once it's idle
+ *
+ * NOTE: arguably, we should also wait for all the other
+ * RX channels to quiesce (how??) and then temporarily
+ * disable RXCPPI_CTRL_REG ... but it seems that we can
+ * rely on the controller restarting from state ram, with
+ * only RXCPPI_BUFCNT state being bogus. BUFCNT will
+ * correct itself after the next DMA transfer though.
+ *
+ * REVISIT does using rndis mode change that?
+ */
+ cppi_reset_rx(cppi_ch->state_ram);
+
+ /* next DMA request _should_ load cppi head ptr */
+
+ /* ... we don't "free" that list, only mutate it in place. */
+ cppi_dump_rx(5, cppi_ch, " (done abort)");
+
+ /* clean up previously pending bds */
+ cppi_bd_free(cppi_ch, cppi_ch->last_processed);
+ cppi_ch->last_processed = NULL;
+
+ while (queue) {
+ struct cppi_descriptor *tmp = queue->next;
+
+ cppi_bd_free(cppi_ch, queue);
+ queue = tmp;
+ }
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->buf_dma = 0;
+ cppi_ch->offset = 0;
+ cppi_ch->buf_len = 0;
+ cppi_ch->maxpacket = 0;
+ return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram? Clocking is presumably shared with usb core.
+ */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 0000000..fc5216b
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,133 @@
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+
+#include "musb_dma.h"
+#include "musb_core.h"
+
+
+/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
+ * would seem to be shared with the TUSB6020 (over VLYNQ).
+ */
+
+#include "davinci.h"
+
+
+/* CPPI RX/TX state RAM */
+
+struct cppi_tx_stateram {
+ u32 tx_head; /* "DMA packet" head descriptor */
+ u32 tx_buf;
+ u32 tx_current; /* current descriptor */
+ u32 tx_buf_current;
+ u32 tx_info; /* flags, remaining buflen */
+ u32 tx_rem_len;
+ u32 tx_dummy; /* unused */
+ u32 tx_complete;
+};
+
+struct cppi_rx_stateram {
+ u32 rx_skipbytes;
+ u32 rx_head;
+ u32 rx_sop; /* "DMA packet" head descriptor */
+ u32 rx_current; /* current descriptor */
+ u32 rx_buf_current;
+ u32 rx_len_len;
+ u32 rx_cnt_cnt;
+ u32 rx_complete;
+};
+
+/* hw_options bits in CPPI buffer descriptors */
+#define CPPI_SOP_SET ((u32)(1 << 31))
+#define CPPI_EOP_SET ((u32)(1 << 30))
+#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
+#define CPPI_EOQ_MASK ((u32)(1 << 28))
+#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */
+
+struct cppi_descriptor {
+ /* hardware overlay */
+ u32 hw_next; /* next buffer descriptor Pointer */
+ u32 hw_bufp; /* i/o buffer pointer */
+ u32 hw_off_len; /* buffer_offset16, buffer_length16 */
+ u32 hw_options; /* flags: SOP, EOP etc*/
+
+ struct cppi_descriptor *next;
+ dma_addr_t dma; /* address of this descriptor */
+ u32 buflen; /* for RX: original buffer length */
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+struct cppi;
+
+/* CPPI Channel Control structure */
+struct cppi_channel {
+ struct dma_channel channel;
+
+ /* back pointer to the DMA controller structure */
+ struct cppi *controller;
+
+ /* which direction of which endpoint? */
+ struct musb_hw_ep *hw_ep;
+ bool transmit;
+ u8 index;
+
+ /* DMA modes: RNDIS or "transparent" */
+ u8 is_rndis;
+
+ /* book keeping for current transfer request */
+ dma_addr_t buf_dma;
+ u32 buf_len;
+ u32 maxpacket;
+ u32 offset; /* dma requested */
+
+ void __iomem *state_ram; /* CPPI state */
+
+ struct cppi_descriptor *freelist;
+
+ /* BD management fields */
+ struct cppi_descriptor *head;
+ struct cppi_descriptor *tail;
+ struct cppi_descriptor *last_processed;
+
+ /* use tx_complete in host role to track endpoints waiting for
+ * FIFONOTEMPTY to clear.
+ */
+ struct list_head tx_complete;
+};
+
+/* CPPI DMA controller object */
+struct cppi {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *mregs; /* Mentor regs */
+ void __iomem *tibase; /* TI/CPPI regs */
+
+ struct cppi_channel tx[MUSB_C_NUM_EPT - 1];
+ struct cppi_channel rx[MUSB_C_NUM_EPR - 1];
+
+ struct dma_pool *pool;
+
+ struct list_head tx_complete;
+};
+
+/* irq handling hook */
+extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+
+#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 0000000..75baf18
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/arch/gpio.h>
+#include <asm/mach-types.h>
+
+#include "musb_core.h"
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#include <asm/arch/i2c-client.h>
+#endif
+
+#include "davinci.h"
+#include "cppi_dma.h"
+
+
+/* REVISIT (PM) we should be able to keep the PHY in low power mode most
+ * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHYPLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+ /* start the on-chip PHY and its PLL */
+ __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
+ (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
+ while ((__raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR))
+ & USBPHY_PHYCLKGD) == 0)
+ cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+ /* powerdown the on-chip PHY and its oscillator */
+ __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR));
+}
+
+static int dma_off = 1;
+
+void musb_platform_enable(struct musb *musb)
+{
+ u32 tmp, old, val;
+
+ /* workaround: setup irqs through both register sets */
+ tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
+ << DAVINCI_USB_TXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ old = tmp;
+ tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+ << DAVINCI_USB_RXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ tmp |= old;
+
+ val = ~MUSB_INTR_SOF;
+ tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+ if (is_dma_capable() && !dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 0;
+
+ /* force a DRVVBUS irq so we can start polling for ID change */
+ if (is_otg_enabled(musb))
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ /* because we don't set CTRLR.UINT, "important" to:
+ * - not read/write INTRUSB/INTRUSBE
+ * - (except during initial setup, as workaround)
+ * - use INTSETR/INTCLRR instead
+ */
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+ DAVINCI_USB_USBINT_MASK
+ | DAVINCI_USB_TXINT_MASK
+ | DAVINCI_USB_RXINT_MASK);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+ if (is_dma_capable() && !dma_off)
+ WARNING("dma still active\n");
+}
+
+
+/* REVISIT it's not clear whether DaVinci can support full OTG. */
+
+static int vbus_state = -1;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define portstate(stmt) stmt
+#else
+#define portstate(stmt)
+#endif
+
+
+/* VBUS SWITCHING IS BOARD-SPECIFIC */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(struct work_struct *ignored)
+{
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
+ vbus_state = !vbus_state;
+}
+static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
+#endif /* modified board */
+#endif /* EVM */
+
+static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+{
+ if (is_on)
+ is_on = 1;
+
+ if (vbus_state == is_on)
+ return;
+ vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+ if (machine_is_davinci_evm()) {
+#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
+ /* modified EVM board switching VBUS with GPIO(6) not I2C
+ * NOTE: PINMUX0.RGB888 (bit23) must be clear
+ */
+ if (is_on)
+ gpio_set(GPIO(6));
+ else
+ gpio_clear(GPIO(6));
+ immediate = 1;
+#else
+ if (immediate)
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+ else
+ schedule_work(&evm_vbus_work);
+#endif
+ }
+#endif
+ if (immediate)
+ vbus_state = is_on;
+}
+
+static void davinci_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+ davinci_source_power(musb, is_on, 0);
+}
+
+
+#define POLL_SECONDS 2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /* We poll because DaVinci's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VFALL:
+ /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
+ * seems to mis-handle session "start" otherwise (or in our
+ * case "recover"), in routine "VBUS was valid by the time
+ * VBUSERR got reported during enumeration" cases.
+ */
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ if (!is_peripheral_enabled(musb))
+ break;
+
+ /* There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.SESSION flag).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE setting the session flag is _supposed_ to trigger
+ * SRP, but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL,
+ devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t davinci_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+ void __iomem *tibase = musb->ctrl_base;
+ u32 tmp;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ *
+ * Docs describe irq "vector" registers asociated with the CPPI and
+ * USB EOI registers. These hold a bitmask corresponding to the
+ * current IRQ, not an irq handler address. Would using those bits
+ * resolve some of the races observed in this dispatch code??
+ */
+
+ /* CPPI interrupts share the same IRQ line, but have their own
+ * mask, state, "vector", and EOI registers.
+ */
+ if (is_cppi_enabled()) {
+ u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (cppi_tx || cppi_rx) {
+ DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
+ cppi_completion(musb, cppi_rx, cppi_tx);
+ retval = IRQ_HANDLED;
+ }
+ }
+
+ /* ack and handle non-CPPI interrupts */
+ tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+ musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+ DBG(4, "IRQ %08x\n", tmp);
+
+ musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+ >> DAVINCI_USB_RXINT_SHIFT;
+ musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+ >> DAVINCI_USB_TXINT_SHIFT;
+ musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+ >> DAVINCI_USB_USBINT_SHIFT;
+
+ /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
+ * DaVinci's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires we know its
+ * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
+ int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err = musb->int_usb & MUSB_INTR_VBUSERROR;
+
+ err = is_host_enabled(musb)
+ && (musb->int_usb & MUSB_INTR_VBUSERROR);
+ if (err) {
+ /* The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"starting sessions
+ * without waiting (on EVM, a **long** time) for VBUS
+ * to stop registering in devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (is_host_enabled(musb) && drvvbus) {
+ musb->is_active = 1;
+ MUSB_HST_MODE(musb);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&otg_workaround);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ /* NOTE: this must complete poweron within 100 msec */
+ davinci_source_power(musb, drvvbus, 0);
+ DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ otg_state_string(musb),
+ err ? " ERROR" : "",
+ devctl);
+ retval = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ retval |= musb_interrupt(musb);
+
+ /* irq stays asserted until EOI is written */
+ musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+ /* poll for ID change */
+ if (is_otg_enabled(musb)
+ && musb->xceiv.state == OTG_STATE_B_IDLE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get unhandled IRQs
+ * (e.g. ep0). not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "unhandled? %08x\n", tmp);
+ return IRQ_HANDLED;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ void __iomem *tibase = musb->ctrl_base;
+ u32 revision;
+
+ musb->mregs += DAVINCI_BASE_OFFSET;
+#if 0
+ /* REVISIT there's something odd about clocking, this
+ * didn't appear do the job ...
+ */
+ musb->clock = clk_get(pDevice, "usb");
+ if (IS_ERR(musb->clock))
+ return PTR_ERR(musb->clock);
+
+ status = clk_enable(musb->clock);
+ if (status < 0)
+ return -ENODEV;
+#endif
+
+ /* returns zero if e.g. not clocked */
+ revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+ if (revision == 0)
+ return -ENODEV;
+
+ if (is_host_enabled(musb))
+ setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+
+ musb->board_set_vbus = davinci_set_vbus;
+ davinci_source_power(musb, 0, 1);
+
+ /* reset the controller */
+ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+ /* start the on-chip PHY and its PLL */
+ phy_on();
+
+ msleep(5);
+
+ /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
+ pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+ revision, __raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR)),
+ musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+ musb->isr = davinci_interrupt;
+ return 0;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ if (is_host_enabled(musb))
+ del_timer_sync(&otg_workaround);
+
+ davinci_source_power(musb, 0 /*off*/, 1);
+
+ /* delay, to avoid problems with module reload */
+ if (is_host_enabled(musb) && musb->xceiv.default_a) {
+ int maxdelay = 30;
+ u8 devctl, warn = 0;
+
+ /* if there's no peripheral connected, this can take a
+ * long time to fall, especially on EVM with huge C133.
+ */
+ do {
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (!(devctl & MUSB_DEVCTL_VBUS))
+ break;
+ if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
+ warn = devctl & MUSB_DEVCTL_VBUS;
+ DBG(1, "VBUS %d\n",
+ warn >> MUSB_DEVCTL_VBUS_SHIFT);
+ }
+ msleep(1000);
+ maxdelay--;
+ } while (maxdelay > 0);
+
+ /* in OTG mode, another host might be connected */
+ if (devctl & MUSB_DEVCTL_VBUS)
+ DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
+ }
+
+ phy_off();
+ return 0;
+}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 0000000..7fb6238
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_PHYCLKGD (1 << 8)
+#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
+#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
+#define USBPHY_CLKO1SEL (1 << 3)
+#define USBPHY_OSCPDWN (1 << 2)
+#define USBPHY_PHYPDWN (1 << 0)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG 0x00
+#define DAVINCI_USB_CTRL_REG 0x04
+#define DAVINCI_USB_STAT_REG 0x08
+#define DAVINCI_RNDIS_REG 0x10
+#define DAVINCI_AUTOREQ_REG 0x14
+#define DAVINCI_USB_INT_SOURCE_REG 0x20
+#define DAVINCI_USB_INT_SET_REG 0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
+#define DAVINCI_USB_INT_MASK_REG 0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG 0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
+#define DAVINCI_USB_EOI_REG 0x3c
+#define DAVINCI_USB_EOI_INTVEC 0x40
+
+/* BEGIN CPPI-generic (?) */
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG 0x80
+#define DAVINCI_TXCPPI_TEAR_REG 0x84
+#define DAVINCI_CPPI_EOI_REG 0x88
+#define DAVINCI_CPPI_INTVEC_REG 0x8c
+#define DAVINCI_TXCPPI_MASKED_REG 0x90
+#define DAVINCI_TXCPPI_RAW_REG 0x94
+#define DAVINCI_TXCPPI_INTENAB_REG 0x98
+#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG 0xC0
+#define DAVINCI_RXCPPI_MASKED_REG 0xD0
+#define DAVINCI_RXCPPI_RAW_REG 0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE 1
+#define DAVINCI_DMA_CTRL_DISABLE 0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* END CPPI-generic (?) */
+
+#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT 16
+#define DAVINCI_USB_TXINT_SHIFT 0
+#define DAVINCI_USB_RXINT_SHIFT 8
+
+#define DAVINCI_INTR_DRVVBUS 0x0100
+
+#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+ (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+ (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET 0x400
+
+#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 0000000..c5b8f02
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2253 @@
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works. These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments. Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE: the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version. This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ * - Lack of host-side transaction scheduling, for all transfer types.
+ * The hardware doesn't do it; instead, software must.
+ *
+ * This is not an issue for OTG devices that don't support external
+ * hubs, but for more "normal" USB hosts it's a user issue that the
+ * "multipoint" support doesn't scale in the expected ways. That
+ * includes DaVinci EVM in a common non-OTG mode.
+ *
+ * * Control and bulk use dedicated endpoints, and there's as
+ * yet no mechanism to either (a) reclaim the hardware when
+ * peripherals are NAKing, which gets complicated with bulk
+ * endpoints, or (b) use more than a single bulk endpoint in
+ * each direction.
+ *
+ * RESULT: one device may be perceived as blocking another one.
+ *
+ * * Interrupt and isochronous will dynamically allocate endpoint
+ * hardware, but (a) there's no record keeping for bandwidth;
+ * (b) in the common case that few endpoints are available, there
+ * is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ * RESULT: At one extreme, bandwidth can be overcommitted in
+ * some hardware configurations, no faults will be reported.
+ * At the other extreme, the bandwidth capabilities which do
+ * exist tend to be severely undercommitted. You can't yet hook
+ * up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ * - Kconfig for everything user-configurable
+ * - <asm/arch/hdrc_cnf.h> for SOC or family details
+ * - platform_device for addressing, irq, and platform_data
+ * - platform_data is mostly for board-specific informarion
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_ARM
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include "musb_core.h"
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+
+
+unsigned debug;
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION "6.0"
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+#define MUSB_DRIVER_NAME "musb_hdrc"
+const char musb_driver_name[] = MUSB_DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* usbcore insists dev->driver_data is a "struct hcd *" */
+ return hcd_to_musb(dev_get_drvdata(dev));
+#else
+ return dev_get_drvdata(dev);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef CONFIG_USB_TUSB6010
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ prefetch((u8 *)src);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', hw_ep->epnum, fifo, len, src);
+
+ /* we can't assume unaligned reads work */
+ if (likely((0x01 & (unsigned long) src) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned source address */
+ if ((0x02 & (unsigned long) src) == 0) {
+ if (len >= 4) {
+ writesl(fifo, src + index, len >> 2);
+ index += len & ~0x03;
+ }
+ if (len & 0x02) {
+ musb_writew(fifo, 0, *(u16 *)&src[index]);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ writesw(fifo, src + index, len >> 1);
+ index += len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ musb_writeb(fifo, 0, src[index]);
+ } else {
+ /* byte aligned */
+ writesb(fifo, src, len);
+ }
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
+
+ /* we can't assume unaligned writes work */
+ if (likely((0x01 & (unsigned long) dst) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) dst) == 0) {
+ if (len >= 4) {
+ readsl(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ *(u16 *)&dst[index] = musb_readw(fifo, 0);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ readsw(fifo, dst, len >> 1);
+ index = len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ dst[index] = musb_readb(fifo, 0);
+ } else {
+ /* byte aligned */
+ readsb(fifo, dst, len);
+ }
+}
+
+#endif /* normal PIO */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+ /* implicit CRC16 then EOP to end */
+};
+
+void musb_load_testpacket(struct musb *musb)
+{
+ void __iomem *regs = musb->endpoints[0].regs;
+
+ musb_ep_select(musb->mregs, 0);
+ musb_write_fifo(musb->control_ep,
+ sizeof(musb_test_packet), musb_test_packet);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+}
+
+/*-------------------------------------------------------------------------*/
+
+const char *otg_state_string(struct musb *musb)
+{
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE: return "a_idle";
+ case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
+ case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
+ case OTG_STATE_A_HOST: return "a_host";
+ case OTG_STATE_A_SUSPEND: return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
+ case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
+ case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
+ case OTG_STATE_B_IDLE: return "b_idle";
+ case OTG_STATE_B_SRP_INIT: return "b_srp_init";
+ case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
+ case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
+ case OTG_STATE_B_HOST: return "b_host";
+ default: return "UNDEFINED";
+ }
+}
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+/*
+ * See also USB_OTG_1-3.pdf 6.6.5 Timers
+ * REVISIT: Are the other timers done in the hardware?
+ */
+#define TB_ASE0_BRST 100 /* Min 3.125 ms */
+
+/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+void musb_otg_timer_func(unsigned long data)
+{
+ struct musb *musb = (struct musb *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+ musb_g_disconnect(musb);
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
+ musb_hnp_stop(musb);
+ break;
+ default:
+ DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
+ }
+ musb->ignore_disconnect = 0;
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
+
+/*
+ * Stops the B-device HNP state. Caller must take care of locking.
+ */
+void musb_hnp_stop(struct musb *musb)
+{
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+ void __iomem *mbase = musb->mregs;
+ u8 reg;
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_PERIPHERAL:
+ case OTG_STATE_A_WAIT_VFALL:
+ case OTG_STATE_A_WAIT_BCON:
+ DBG(1, "HNP: Switching back to A-host\n");
+ musb_g_disconnect(musb);
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_B_HOST:
+ DBG(1, "HNP: Disabling HR\n");
+ hcd->self.is_b_host = 0;
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ reg = musb_readb(mbase, MUSB_POWER);
+ reg |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ /* REVISIT: Start SESSION_REQUEST here? */
+ break;
+ default:
+ DBG(1, "HNP: Stopping in unknown state %s\n",
+ otg_state_string(musb));
+ }
+
+ /*
+ * When returning to A state after HNP, avoid hub_port_rebounce(),
+ * which cause occasional OPT A "Did not receive reset after connect"
+ * errors.
+ */
+ musb->port1_status &=
+ ~(1 << USB_PORT_FEAT_C_CONNECTION);
+}
+
+#endif
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+
+#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
+ | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
+ | MUSB_INTR_RESET)
+
+static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+ void __iomem *mbase = musb->mregs;
+
+ DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
+ int_usb);
+
+ /* in host mode, the peripheral may issue remote wakeup.
+ * in peripheral mode, the host may resume the link.
+ * spurious RESUME irqs happen too, paired with SUSPEND.
+ */
+ if (int_usb & MUSB_INTR_RESUME) {
+ handled = IRQ_HANDLED;
+ DBG(3, "RESUME (%s)\n", otg_state_string(musb));
+
+ if (devctl & MUSB_DEVCTL_HM) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_SUSPEND:
+ /* remote wakeup? later, GetPortStatus
+ * will stop RESUME signaling
+ */
+
+ if (power & MUSB_POWER_SUSPENDM) {
+ /* spurious */
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ DBG(2, "Spurious SUSPENDM\n");
+ break;
+ }
+
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESUME);
+
+ musb->port1_status |=
+ (USB_PORT_STAT_C_SUSPEND << 16)
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
+
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->is_active = 1;
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 1;
+ MUSB_DEV_MODE(musb);
+ break;
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "host",
+ otg_state_string(musb));
+ }
+#endif
+ } else {
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_SUSPEND:
+ /* possibly DISCONNECT is upcoming */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ /* disconnect while suspended? we may
+ * not get a disconnect irq...
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+ ) {
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+ }
+ musb_g_resume(musb);
+ break;
+ case OTG_STATE_B_IDLE:
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+#endif
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "peripheral",
+ otg_state_string(musb));
+ }
+ }
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* see manual for the order of the tests */
+ if (int_usb & MUSB_INTR_SESSREQ) {
+ DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
+
+ /* IRQ arrives from ID pin sense or (later, if VBUS power
+ * is removed) SRP. responses are time critical:
+ * - turn on VBUS (with silicon-specific mechanism)
+ * - go through A_WAIT_VRISE
+ * - ... to A_WAIT_BCON.
+ * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+ */
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb->ep0_stage = MUSB_EP0_START;
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb_set_vbus(musb, 1);
+
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_VBUSERROR) {
+ int ignore = 0;
+
+ /* During connection as an A-Device, we may see a short
+ * current spikes causing voltage drop, because of cable
+ * and peripheral capacitance combined with vbus draw.
+ * (So: less common with truly self-powered devices, where
+ * vbus doesn't act like a power supply.)
+ *
+ * Such spikes are short; usually less than ~500 usec, max
+ * of ~2 msec. That is, they're not sustained overcurrent
+ * errors, though they're reported using VBUSERROR irqs.
+ *
+ * Workarounds: (a) hardware: use self powered devices.
+ * (b) software: ignore non-repeated VBUS errors.
+ *
+ * REVISIT: do delays from lots of DEBUG_KERNEL checks
+ * make trouble here, keeping VBUS < 4.4V ?
+ */
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ /* recovery is dicey once we've gotten past the
+ * initial stages of enumeration, but if VBUS
+ * stayed ok at the other end of the link, and
+ * another reset is due (at least for high speed,
+ * to redo the chirp etc), it might work OK...
+ */
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_WAIT_VRISE:
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ ignore = 1;
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mbase, MUSB_DEVCTL, devctl);
+ } else {
+ musb->port1_status |=
+ (1 << USB_PORT_FEAT_OVER_CURRENT)
+ | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+ break;
+ default:
+ break;
+ }
+
+ DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+ otg_state_string(musb),
+ devctl,
+ ({ char *s;
+ switch (devctl & MUSB_DEVCTL_VBUS) {
+ case 0 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<SessEnd"; break;
+ case 1 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<AValid"; break;
+ case 2 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<VBusValid"; break;
+ /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
+ default:
+ s = "VALID"; break;
+ }; s; }),
+ VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
+ musb->port1_status);
+
+ /* go through A_WAIT_VFALL then start a new session */
+ if (!ignore)
+ musb_set_vbus(musb, 0);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_CONNECT) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ handled = IRQ_HANDLED;
+ musb->is_active = 1;
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ musb->ep0_stage = MUSB_EP0_START;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ /* flush endpoints when transitioning from Device Mode */
+ if (is_peripheral_active(musb)) {
+ /* REVISIT HNP; just force disconnect */
+ }
+ musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
+ musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
+#endif
+ musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+ |USB_PORT_STAT_HIGH_SPEED
+ |USB_PORT_STAT_ENABLE
+ );
+ musb->port1_status |= USB_PORT_STAT_CONNECTION
+ |(USB_PORT_STAT_C_CONNECTION << 16);
+
+ /* high vs full speed is just a guess until after reset */
+ if (devctl & MUSB_DEVCTL_LSDEV)
+ musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+ if (hcd->status_urb)
+ usb_hcd_poll_rh_status(hcd);
+ else
+ usb_hcd_resume_root_hub(hcd);
+
+ MUSB_HST_MODE(musb);
+
+ /* indicate new connection to OTG machine */
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
+ musb->xceiv.state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+ int_usb &= ~MUSB_INTR_SUSPEND;
+ } else
+ DBG(1, "CONNECT as b_peripheral???\n");
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: Waiting to switch to b_host state\n");
+ musb->xceiv.state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+ break;
+ default:
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ hcd->self.is_b_host = 0;
+ }
+ break;
+ }
+ DBG(1, "CONNECT (%s) devctl %02x\n",
+ otg_state_string(musb), devctl);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* mentor saves a bit: bus reset and babble share the same irq.
+ * only host sees babble; only peripheral sees bus reset.
+ */
+ if (int_usb & MUSB_INTR_RESET) {
+ if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+ /*
+ * Looks like non-HS BABBLE can be ignored, but
+ * HS BABBLE is an error condition. For HS the solution
+ * is to avoid babble in the first place and fix what
+ * caused BABBLE. When HS BABBLE happens we can only
+ * stop the session.
+ */
+ if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
+ DBG(1, "BABBLE devctl: %02x\n", devctl);
+ else {
+ ERR("Stopping host session -- babble\n");
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
+ }
+ } else if (is_peripheral_capable()) {
+ DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_OTG
+ case OTG_STATE_A_SUSPEND:
+ /* We need to ignore disconnect on suspend
+ * otherwise tusb 2.0 won't reconnect after a
+ * power cycle, which breaks otg compliance.
+ */
+ musb->ignore_disconnect = 1;
+ musb_g_reset(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
+ DBG(1, "HNP: Setting timer as %s\n",
+ otg_state_string(musb));
+ musb_otg_timer.data = (unsigned long)musb;
+ mod_timer(&musb_otg_timer, jiffies
+ + msecs_to_jiffies(100));
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: RESET (%s), to b_peripheral\n",
+ otg_state_string(musb));
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb_g_reset(musb);
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ /* FALLTHROUGH */
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_reset(musb);
+ break;
+ default:
+ DBG(1, "Unhandled BUS RESET as %s\n",
+ otg_state_string(musb));
+ }
+ }
+
+ handled = IRQ_HANDLED;
+ }
+ schedule_work(&musb->irq_work);
+
+ return handled;
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+ if (int_usb & MUSB_INTR_SOF) {
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *ep;
+ u8 epnum;
+ u16 frame;
+
+ DBG(6, "START_OF_FRAME\n");
+ handled = IRQ_HANDLED;
+
+ /* start any periodic Tx transfers waiting for current frame */
+ frame = musb_readw(mbase, MUSB_FRAME);
+ ep = musb->endpoints;
+ for (epnum = 1; (epnum < musb->nr_endpoints)
+ && (musb->epmask >= (1 << epnum));
+ epnum++, ep++) {
+ /*
+ * FIXME handle framecounter wraps (12 bits)
+ * eliminate duplicated StartUrb logic
+ */
+ if (ep->dwWaitFrame >= frame) {
+ ep->dwWaitFrame = 0;
+ pr_debug("SOF --> periodic TX%s on %d\n",
+ ep->tx_channel ? " DMA" : "",
+ epnum);
+ if (!ep->tx_channel)
+ musb_h_tx_start(musb, epnum);
+ else
+ cppi_hostdma_start(musb, epnum);
+ }
+ } /* end of for loop */
+ }
+#endif
+
+ if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+ DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+ otg_state_string(musb),
+ MUSB_MODE(musb), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+#endif /* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
+ /* FALLTHROUGH */
+#endif /* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+#endif /* GADGET */
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ otg_state_string(musb));
+ break;
+ }
+
+ schedule_work(&musb->irq_work);
+ }
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+ otg_state_string(musb), devctl, power);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_A_PERIPHERAL:
+ /*
+ * We cannot stop HNP here, devctl BDEVICE might be
+ * still set.
+ */
+ break;
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.gadget->b_hnp_enable;
+ if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+ musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+ musb_otg_timer.data = (unsigned long)musb;
+ mod_timer(&musb_otg_timer, jiffies
+ + msecs_to_jiffies(TB_ASE0_BRST));
+#endif
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ schedule_work(&musb->irq_work);
+ }
+
+
+ return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musb_start(struct musb *musb)
+{
+ void __iomem *regs = musb->mregs;
+ u8 devctl = musb_readb(regs, MUSB_DEVCTL);
+
+ DBG(2, "<== devctl %02x\n", devctl);
+
+ /* Set INT enable registers, enable interrupts */
+ musb_writew(regs, MUSB_INTRTXE, musb->epmask);
+ musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+ musb_writeb(regs, MUSB_TESTMODE, 0);
+
+ /* put into basic highspeed mode and start session */
+ musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+ | MUSB_POWER_SOFTCONN
+ | MUSB_POWER_HSENAB
+ /* ENSUSPEND wedges tusb */
+ /* | MUSB_POWER_ENSUSPEND */
+ );
+
+ musb->is_active = 0;
+ devctl = musb_readb(regs, MUSB_DEVCTL);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ if (is_otg_enabled(musb)) {
+ /* session started after:
+ * (a) ID-grounded irq, host mode;
+ * (b) vbus present/connect IRQ, peripheral mode;
+ * (c) peripheral initiates, using SRP
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->is_active = 1;
+ else
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ } else if (is_host_enabled(musb)) {
+ /* assume ID pin is hard-wired to ground */
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ } else /* peripheral is enabled */ {
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->is_active = 1;
+ }
+ musb_platform_enable(musb);
+ musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
+
+static void musb_generic_disable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u16 temp;
+
+ /* disable interrupts */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0);
+ musb_writew(mbase, MUSB_INTRTXE, 0);
+ musb_writew(mbase, MUSB_INTRRXE, 0);
+
+ /* off */
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
+
+ /* flush pending interrupts */
+ temp = musb_readb(mbase, MUSB_INTRUSB);
+ temp = musb_readw(mbase, MUSB_INTRTX);
+ temp = musb_readw(mbase, MUSB_INTRRX);
+
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb *musb)
+{
+ /* stop IRQs, timers, ... */
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ DBG(3, "HDRC disabled\n");
+
+ /* FIXME
+ * - mark host and/or peripheral drivers unusable/inactive
+ * - disable DMA (and enable it in HdrcStart)
+ * - make sure we can musb_start() after musb_stop(); with
+ * OTG mode, gadget driver module rmmod/modprobe cycles that
+ * - ...
+ */
+ musb_platform_try_idle(musb, 0);
+}
+
+static void musb_shutdown(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ if (musb->clock) {
+ clk_put(musb->clock);
+ musb->clock = NULL;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* FIXME power down */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing. The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested. We use normal
+ * idioms to so both modes are compile-tested, but dead code elimination
+ * leaves only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+#if defined(CONFIG_USB_TUSB6010) || \
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static ushort __initdata fifo_mode = 4;
+#else
+static ushort __initdata fifo_mode = 2;
+#endif
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+
+enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
+enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
+
+struct fifo_cfg {
+ u8 hw_ep_num;
+ enum fifo_style style;
+ enum buf_mode mode;
+ u16 maxpacket;
+};
+
+/*
+ * tables defining fifo_mode values. define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static struct fifo_cfg __initdata mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static struct fifo_cfg __initdata mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static struct fifo_cfg __initdata mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 3 - fits in 4KB */
+static struct fifo_cfg __initdata mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 4 - fits in 16KB */
+static struct fifo_cfg __initdata mode_4_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __init
+fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ const struct fifo_cfg *cfg, u16 offset)
+{
+ void __iomem *mbase = musb->mregs;
+ int size = 0;
+ u16 maxpacket = cfg->maxpacket;
+ u16 c_off = offset >> 3;
+ u8 c_size;
+
+ /* expect hw_ep has already been zero-initialized */
+
+ size = ffs(max(maxpacket, (u16) 8)) - 1;
+ maxpacket = 1 << size;
+
+ c_size = size - 3;
+ if (cfg->mode == BUF_DOUBLE) {
+ if ((offset + (maxpacket << 1)) >
+ (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ c_size |= MUSB_FIFOSZ_DPB;
+ } else {
+ if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ }
+
+ /* configure the FIFO */
+ musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* EP0 reserved endpoint for control, bidirectional;
+ * EP1 reserved for bulk, two unidirection halves.
+ */
+ if (hw_ep->epnum == 1)
+ musb->bulk_ep = hw_ep;
+ /* REVISIT error check: be sure ep0 can both rx and tx ... */
+#endif
+ switch (cfg->style) {
+ case FIFO_TX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_tx = maxpacket;
+ break;
+ case FIFO_RX:
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+ break;
+ case FIFO_RXTX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+ hw_ep->max_packet_sz_tx = maxpacket;
+
+ hw_ep->is_shared_fifo = true;
+ break;
+ }
+
+ /* NOTE rx and tx endpoint irqs aren't managed separately,
+ * which happens to be ok
+ */
+ musb->epmask |= (1 << hw_ep->epnum);
+
+ return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static struct fifo_cfg __initdata ep0_cfg = {
+ .style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int __init ep_config_from_table(struct musb *musb)
+{
+ const struct fifo_cfg *cfg;
+ unsigned i, n;
+ int offset;
+ struct musb_hw_ep *hw_ep = musb->endpoints;
+
+ switch (fifo_mode) {
+ default:
+ fifo_mode = 0;
+ /* FALLTHROUGH */
+ case 0:
+ cfg = mode_0_cfg;
+ n = ARRAY_SIZE(mode_0_cfg);
+ break;
+ case 1:
+ cfg = mode_1_cfg;
+ n = ARRAY_SIZE(mode_1_cfg);
+ break;
+ case 2:
+ cfg = mode_2_cfg;
+ n = ARRAY_SIZE(mode_2_cfg);
+ break;
+ case 3:
+ cfg = mode_3_cfg;
+ n = ARRAY_SIZE(mode_3_cfg);
+ break;
+ case 4:
+ cfg = mode_4_cfg;
+ n = ARRAY_SIZE(mode_4_cfg);
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+ musb_driver_name, fifo_mode);
+
+
+ offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+ /* assert(offset > 0) */
+
+ /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
+ * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
+ */
+
+ for (i = 0; i < n; i++) {
+ u8 epn = cfg->hw_ep_num;
+
+ if (epn >= musb->config->num_eps) {
+ pr_debug("%s: invalid ep %d\n",
+ musb_driver_name, epn);
+ continue;
+ }
+ offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+ if (offset < 0) {
+ pr_debug("%s: mem overrun, ep %d\n",
+ musb_driver_name, epn);
+ return -EINVAL;
+ }
+ epn++;
+ musb->nr_endpoints = max(epn, musb->nr_endpoints);
+ }
+
+ printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
+ musb_driver_name,
+ n + 1, musb->config->num_eps * 2 - 1,
+ offset, (1 << (musb->config->ram_bits + 2)));
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+static int __init ep_config_from_hw(struct musb *musb)
+{
+ u8 epnum = 0, reg;
+ struct musb_hw_ep *hw_ep;
+ void *mbase = musb->mregs;
+
+ DBG(2, "<== static silicon ep config\n");
+
+ /* FIXME pick up ep0 maxpacket size */
+
+ for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
+ musb_ep_select(mbase, epnum);
+ hw_ep = musb->endpoints + epnum;
+
+ /* read from core using indexed model */
+ reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+ if (!reg) {
+ /* 0's returned when no more endpoints */
+ break;
+ }
+ musb->nr_endpoints++;
+ musb->epmask |= (1 << epnum);
+
+ hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+ /* shared TX/RX FIFO? */
+ if ((reg & 0xf0) == 0xf0) {
+ hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+ hw_ep->is_shared_fifo = true;
+ continue;
+ } else {
+ hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+ hw_ep->is_shared_fifo = false;
+ }
+
+ /* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* pick an RX/TX endpoint for bulk */
+ if (hw_ep->max_packet_sz_tx < 512
+ || hw_ep->max_packet_sz_rx < 512)
+ continue;
+
+ /* REVISIT: this algorithm is lazy, we should at least
+ * try to pick a double buffered endpoint.
+ */
+ if (musb->bulk_ep)
+ continue;
+ musb->bulk_ep = hw_ep;
+#endif
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __init musb_core_init(u16 musb_type, struct musb *musb)
+{
+#ifdef MUSB_AHB_ID
+ u32 data;
+#endif
+ u8 reg;
+ char *type;
+ u16 hwvers, rev_major, rev_minor;
+ char aInfo[78], aRevision[32], aDate[12];
+ void __iomem *mbase = musb->mregs;
+ int status = 0;
+ int i;
+
+ /* log core options (read using indexed model) */
+ musb_ep_select(mbase, 0);
+ reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+
+ strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+ if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ strcat(aInfo, ", dyn FIFOs");
+ if (reg & MUSB_CONFIGDATA_MPRXE) {
+ strcat(aInfo, ", bulk combine");
+#ifdef C_MP_RX
+ musb->bulk_combine = true;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MUSB_CONFIGDATA_MPTXE) {
+ strcat(aInfo, ", bulk split");
+#ifdef C_MP_TX
+ musb->bulk_split = true;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MUSB_CONFIGDATA_HBRXE) {
+ strcat(aInfo, ", HB-ISO Rx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MUSB_CONFIGDATA_HBTXE) {
+ strcat(aInfo, ", HB-ISO Tx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MUSB_CONFIGDATA_SOFTCONE)
+ strcat(aInfo, ", SoftConn");
+
+ printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
+ musb_driver_name, reg, aInfo);
+
+#ifdef MUSB_AHB_ID
+ data = musb_readl(mbase, 0x404);
+ sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
+ (data >> 16) & 0xff, (data >> 24) & 0xff);
+ /* FIXME ID2 and ID3 are unused */
+ data = musb_readl(mbase, 0x408);
+ printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
+ data = musb_readl(mbase, 0x40c);
+ printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
+ reg = musb_readb(mbase, 0x400);
+ musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
+#else
+ aDate[0] = 0;
+#endif
+ if (MUSB_CONTROLLER_MHDRC == musb_type) {
+ musb->is_multipoint = 1;
+ type = "M";
+ } else {
+ musb->is_multipoint = 0;
+ type = "";
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
+ printk(KERN_ERR
+ "%s: kernel must blacklist external hubs\n",
+ musb_driver_name);
+#endif
+#endif
+ }
+
+ /* log release info */
+ hwvers = musb_readw(mbase, MUSB_HWVERS);
+ rev_major = (hwvers >> 10) & 0x1f;
+ rev_minor = hwvers & 0x3ff;
+ snprintf(aRevision, 32, "%d.%d%s", rev_major,
+ rev_minor, (hwvers & 0x8000) ? "RC" : "");
+ printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
+ musb_driver_name, type, aRevision, aDate);
+
+ /* configure ep0 */
+ musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+
+ /* discover endpoint configuration */
+ musb->nr_endpoints = 1;
+ musb->epmask = 1;
+
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
+ if (musb->config->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else {
+ ERR("reconfigure software for Dynamic FIFOs\n");
+ status = -ENODEV;
+ }
+ } else {
+ if (!musb->config->dyn_fifo)
+ status = ep_config_from_hw(musb);
+ else {
+ ERR("reconfigure software for static FIFOs\n");
+ return -ENODEV;
+ }
+ }
+
+ if (status < 0)
+ return status;
+
+ /* finish init, and print endpoint config */
+ for (i = 0; i < musb->nr_endpoints; i++) {
+ struct musb_hw_ep *hw_ep = musb->endpoints + i;
+
+ hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
+#ifdef CONFIG_USB_TUSB6010
+ hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync_va =
+ musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
+
+ if (i == 0)
+ hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+ else
+ hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+#endif
+
+ hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+ hw_ep->rx_reinit = 1;
+ hw_ep->tx_reinit = 1;
+#endif
+
+ if (hw_ep->max_packet_sz_tx) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ hw_ep->is_shared_fifo ? "shared" : "tx",
+ hw_ep->tx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_tx);
+ }
+ if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ "rx",
+ hw_ep->rx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_rx);
+ }
+ if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+ DBG(1, "hw_ep %d not configured\n", i);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get spurious IRQs on g_ep0
+ * not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "spurious?\n");
+
+ return IRQ_HANDLED;
+}
+
+#else
+#define generic_interrupt NULL
+#endif
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect: other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+ irqreturn_t retval = IRQ_NONE;
+ u8 devctl, power;
+ int ep_num;
+ u32 reg;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ power = musb_readb(musb->mregs, MUSB_POWER);
+
+ DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+ (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
+ musb->int_usb, musb->int_tx, musb->int_rx);
+
+ /* the core can interrupt us for multiple reasons; docs have
+ * a generic interrupt flowchart to follow
+ */
+ if (musb->int_usb & STAGE0_MASK)
+ retval |= musb_stage0_irq(musb, musb->int_usb,
+ devctl, power);
+
+ /* "stage 1" is handling endpoint irqs */
+
+ /* handle endpoint 0 first */
+ if (musb->int_tx & 1) {
+ if (devctl & MUSB_DEVCTL_HM)
+ retval |= musb_h_ep0_irq(musb);
+ else
+ retval |= musb_g_ep0_irq(musb);
+ }
+
+ /* RX on endpoints 1-15 */
+ reg = musb->int_rx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ /* musb_ep_select(musb->mregs, ep_num); */
+ /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_rx(musb, ep_num);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_rx(musb, ep_num);
+ }
+ }
+
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* TX on endpoints 1-15 */
+ reg = musb->int_tx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ /* musb_ep_select(musb->mregs, ep_num); */
+ /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_tx(musb, ep_num);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_tx(musb, ep_num);
+ }
+ }
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* finish handling "global" interrupts after handling fifos */
+ if (musb->int_usb)
+ retval |= musb_stage2_irq(musb,
+ musb->int_usb, devctl, power);
+
+ return retval;
+}
+
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static int __initdata use_dma = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ /* called with controller lock already held */
+
+ if (!epnum) {
+#ifndef CONFIG_USB_TUSB_OMAP_DMA
+ if (!is_cppi_enabled()) {
+ /* endpoint 0 */
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_h_ep0_irq(musb);
+ else
+ musb_g_ep0_irq(musb);
+ }
+#endif
+ } else {
+ /* endpoints 1..15 */
+ if (transmit) {
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_tx(musb, epnum);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_tx(musb, epnum);
+ }
+ } else {
+ /* receive */
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_rx(musb, epnum);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_rx(musb, epnum);
+ }
+ }
+ }
+}
+
+#else
+#define use_dma 0
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t
+musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = sprintf(buf, "%s\n", otg_state_string(musb));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static ssize_t
+musb_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ if (!strncmp(buf, "host", 4))
+ musb_platform_set_mode(musb, MUSB_HOST);
+ if (!strncmp(buf, "peripheral", 10))
+ musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ if (!strncmp(buf, "otg", 3))
+ musb_platform_set_mode(musb, MUSB_OTG);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
+
+static ssize_t
+musb_vbus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+
+ if (sscanf(buf, "%lu", &val) < 1) {
+ printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->a_wait_bcon = val;
+ if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+
+static ssize_t
+musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+ int vbus;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ val = musb->a_wait_bcon;
+ vbus = musb_platform_get_vbus_status(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return sprintf(buf, "Vbus %s, timeout %lu\n",
+ vbus ? "on" : "off", val);
+}
+static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can. This allows userspace to trigger SRP.
+ */
+static ssize_t
+musb_srp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned short srp;
+
+ if (sscanf(buf, "%hu", &srp) != 1
+ || (srp != 1)) {
+ printk(KERN_ERR "SRP: Value must be 1\n");
+ return -EINVAL;
+ }
+
+ if (srp == 1)
+ musb_g_wakeup(musb);
+
+ return n;
+}
+static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+
+#endif /* sysfs */
+
+/* Only used to provide driver mode change events */
+static void musb_irq_work(struct work_struct *data)
+{
+ struct musb *musb = container_of(data, struct musb, irq_work);
+ static int old_state;
+
+ if (musb->xceiv.state != old_state) {
+ old_state = musb->xceiv.state;
+ sysfs_notify(&musb->controller->kobj, NULL, "mode");
+ }
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *__init
+allocate_instance(struct device *dev,
+ struct musb_hdrc_config *config, void __iomem *mbase)
+{
+ struct musb *musb;
+ struct musb_hw_ep *ep;
+ int epnum;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct usb_hcd *hcd;
+
+ hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+ if (!hcd)
+ return NULL;
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+ musb = hcd_to_musb(hcd);
+ INIT_LIST_HEAD(&musb->control);
+ INIT_LIST_HEAD(&musb->in_bulk);
+ INIT_LIST_HEAD(&musb->out_bulk);
+
+ hcd->uses_new_polling = 1;
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+#else
+ musb = kzalloc(sizeof *musb, GFP_KERNEL);
+ if (!musb)
+ return NULL;
+ dev_set_drvdata(dev, musb);
+
+#endif
+
+ musb->mregs = mbase;
+ musb->ctrl_base = mbase;
+ musb->nIrq = -ENODEV;
+ musb->config = config;
+ for (epnum = 0, ep = musb->endpoints;
+ epnum < musb->config->num_eps;
+ epnum++, ep++) {
+
+ ep->musb = musb;
+ ep->epnum = epnum;
+ }
+
+ musb->controller = dev;
+ return musb;
+}
+
+static void musb_free(struct musb *musb)
+{
+ /* this has multiple entry modes. it handles fault cleanup after
+ * probe(), where things may be partially set up, as well as rmmod
+ * cleanup after everything's been de-activated.
+ */
+
+#ifdef CONFIG_SYSFS
+ device_remove_file(musb->controller, &dev_attr_mode);
+ device_remove_file(musb->controller, &dev_attr_vbus);
+#ifdef CONFIG_USB_MUSB_OTG
+ device_remove_file(musb->controller, &dev_attr_srp);
+#endif
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ musb_gadget_cleanup(musb);
+#endif
+
+ if (musb->nIrq >= 0) {
+ disable_irq_wake(musb->nIrq);
+ free_irq(musb->nIrq, musb);
+ }
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ (void) c->stop(c);
+ dma_controller_destroy(c);
+ }
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_platform_exit(musb);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+ if (musb->clock) {
+ clk_disable(musb->clock);
+ clk_put(musb->clock);
+ }
+
+#ifdef CONFIG_USB_MUSB_OTG
+ put_device(musb->xceiv.dev);
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ usb_put_hcd(musb_to_hcd(musb));
+#else
+ kfree(musb);
+#endif
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @mregs: virtual address of controller registers,
+ * not yet corrected for platform-specific offsets
+ */
+static int __init
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+ int status;
+ struct musb *musb;
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+ /* The driver might handle more features than the board; OK.
+ * Fail when the board needs a feature that's not enabled.
+ */
+ if (!plat) {
+ dev_dbg(dev, "no platform_data?\n");
+ return -ENODEV;
+ }
+ switch (plat->mode) {
+ case MUSB_HOST:
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_PERIPHERAL:
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_OTG:
+#ifdef CONFIG_USB_MUSB_OTG
+ break;
+#else
+bad_config:
+#endif
+ default:
+ dev_err(dev, "incompatible Kconfig role setting\n");
+ return -EINVAL;
+ }
+
+ /* allocate */
+ musb = allocate_instance(dev, plat->config, ctrl);
+ if (!musb)
+ return -ENOMEM;
+
+ spin_lock_init(&musb->lock);
+ musb->board_mode = plat->mode;
+ musb->board_set_power = plat->set_power;
+ musb->set_clock = plat->set_clock;
+ musb->min_power = plat->min_power;
+
+ /* Clock usage is chip-specific ... functional clock (DaVinci,
+ * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
+ * code does is make sure a clock handle is available; platform
+ * code manages it during start/stop and suspend/resume.
+ */
+ if (plat->clock) {
+ musb->clock = clk_get(dev, plat->clock);
+ if (IS_ERR(musb->clock)) {
+ status = PTR_ERR(musb->clock);
+ musb->clock = NULL;
+ goto fail;
+ }
+ }
+
+ /* assume vbus is off */
+
+ /* platform adjusts musb->mregs and musb->isr if needed,
+ * and activates clocks
+ */
+ musb->isr = generic_interrupt;
+ status = musb_platform_init(musb);
+
+ if (status < 0)
+ goto fail;
+ if (!musb->isr) {
+ status = -ENODEV;
+ goto fail2;
+ }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (use_dma && dev->dma_mask) {
+ struct dma_controller *c;
+
+ c = dma_controller_create(musb, musb->mregs);
+ musb->dma_controller = c;
+ if (c)
+ (void) c->start(c);
+ }
+#endif
+ /* ideally this would be abstracted in platform setup */
+ if (!is_dma_capable() || !musb->dma_controller)
+ dev->dma_mask = NULL;
+
+ /* be sure interrupts are disabled before connecting ISR */
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+
+ /* setup musb parts of the core (especially endpoints) */
+ status = musb_core_init(plat->config->multipoint
+ ? MUSB_CONTROLLER_MHDRC
+ : MUSB_CONTROLLER_HDRC, musb);
+ if (status < 0)
+ goto fail2;
+
+ /* Init IRQ workqueue before request_irq */
+ INIT_WORK(&musb->irq_work, musb_irq_work);
+
+ /* attach to the IRQ */
+ if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+ dev_err(dev, "request_irq %d failed!\n", nIrq);
+ status = -ENODEV;
+ goto fail2;
+ }
+ musb->nIrq = nIrq;
+/* FIXME this handles wakeup irqs wrong */
+ if (enable_irq_wake(nIrq) == 0)
+ device_init_wakeup(dev, 1);
+
+ pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
+ musb_driver_name,
+ ({char *s;
+ switch (musb->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && musb->dma_controller)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* host side needs more setup, except for no-host modes */
+ if (musb->board_mode != MUSB_PERIPHERAL) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ if (musb->board_mode == MUSB_OTG)
+ hcd->self.otg_port = 1;
+ musb->xceiv.host = &hcd->self;
+ hcd->power_budget = 2 * (plat->power ? : 250);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* For the host-only role, we can activate right away.
+ * (We expect the ID pin to be forcibly grounded!!)
+ * Otherwise, wait till the gadget driver hooks up.
+ */
+ if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+
+ status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+ if (status)
+ goto fail;
+
+ DBG(1, "%s mode, status %d, devctl %02x %c\n",
+ "HOST", status,
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ (musb_readb(musb->mregs, MUSB_DEVCTL)
+ & MUSB_DEVCTL_BDEVICE
+ ? 'B' : 'A'));
+
+ } else /* peripheral is enabled */ {
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+
+ status = musb_gadget_setup(musb);
+ if (status)
+ goto fail;
+
+ DBG(1, "%s mode, status %d, dev%02x\n",
+ is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
+ status,
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
+ }
+
+ return 0;
+
+fail:
+ if (musb->clock)
+ clk_put(musb->clock);
+ device_init_wakeup(dev, 0);
+ musb_free(musb);
+ return status;
+
+#ifdef CONFIG_SYSFS
+ status = device_create_file(dev, &dev_attr_mode);
+ status = device_create_file(dev, &dev_attr_vbus);
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ status = device_create_file(dev, &dev_attr_srp);
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+ status = 0;
+#endif
+
+ return status;
+
+fail2:
+ musb_platform_exit(musb);
+ goto fail;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static u64 *orig_dma_mask;
+#endif
+
+static int __init musb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq(pdev, 0);
+ struct resource *iomem;
+ void __iomem *base;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem || irq == 0)
+ return -ENODEV;
+
+ base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ if (!base) {
+ dev_err(dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ /* clobbered by use_dma=n */
+ orig_dma_mask = dev->dma_mask;
+#endif
+ return musb_init_controller(dev, irq, base);
+}
+
+static int __devexit musb_remove(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ void __iomem *ctrl_base = musb->ctrl_base;
+
+ /* this gets called on rmmod.
+ * - Host mode: host may still be active
+ * - Peripheral mode: peripheral is deactivated (or never-activated)
+ * - OTG mode: both roles are deactivated (or never-activated)
+ */
+ musb_shutdown(pdev);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (musb->board_mode == MUSB_HOST)
+ usb_remove_hcd(musb_to_hcd(musb));
+#endif
+ musb_free(musb);
+ iounmap(ctrl_base);
+ device_init_wakeup(&pdev->dev, 0);
+#ifndef CONFIG_MUSB_PIO_ONLY
+ pdev->dev.dma_mask = orig_dma_mask;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (is_peripheral_active(musb)) {
+ /* FIXME force disconnect unless we know USB will wake
+ * the system up quickly enough to respond ...
+ */
+ } else if (is_host_active(musb)) {
+ /* we know all the children are suspended; sometimes
+ * they will even be wakeup-enabled.
+ */
+ }
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ else
+ clk_disable(musb->clock);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static int musb_resume(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ else
+ clk_enable(musb->clock);
+
+ /* for static cmos like DaVinci, register values were preserved
+ * unless for some reason the whole soc powered down and we're
+ * not treating that as a whole-system restart (e.g. swsusp)
+ */
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+#else
+#define musb_suspend NULL
+#define musb_resume NULL
+#endif
+
+static struct platform_driver musb_driver = {
+ .driver = {
+ .name = (char *)musb_driver_name,
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(musb_remove),
+ .shutdown = musb_shutdown,
+ .suspend = musb_suspend,
+ .resume = musb_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init musb_init(void)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (usb_disabled())
+ return 0;
+#endif
+
+ pr_info("%s: version " MUSB_VERSION ", "
+#ifdef CONFIG_MUSB_PIO_ONLY
+ "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+ "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ "tusb-omap-dma"
+#else
+ "?dma?"
+#endif
+ ", "
+#ifdef CONFIG_USB_MUSB_OTG
+ "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+ "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+ "host"
+#endif
+ ", debug=%d\n",
+ musb_driver_name, debug);
+ return platform_driver_probe(&musb_driver, musb_probe);
+}
+
+/* make us init after usbcore and before usb
+ * gadget and host-side drivers start to register
+ */
+subsys_initcall(musb_init);
+
+static void __exit musb_cleanup(void)
+{
+ platform_driver_unregister(&musb_driver);
+}
+module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 0000000..8222725
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,488 @@
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_CORE_H__
+#define __MUSB_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/musb.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+
+
+#include "musb_debug.h"
+#include "musb_dma.h"
+
+#include "musb_io.h"
+#include "musb_regs.h"
+
+#include "musb_gadget.h"
+#include "../core/hcd.h"
+#include "musb_host.h"
+
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
+#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
+#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
+
+/* NOTE: otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m) (!(m)->is_host)
+#define is_host_active(m) ((m)->is_host)
+
+#else
+#define is_peripheral_enabled(musb) is_peripheral_capable()
+#define is_host_enabled(musb) is_host_capable()
+#define is_otg_enabled(musb) 0
+
+#define is_peripheral_active(musb) is_peripheral_capable()
+#define is_host_active(musb) is_host_capable()
+#endif
+
+#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
+/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
+ * override that choice selection (often USB_GADGET_DUMMY_HCD).
+ */
+#ifndef CONFIG_USB_GADGET_MUSB_HDRC
+#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
+#endif
+#endif /* need MUSB gadget selection */
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/fs.h>
+#define MUSB_CONFIG_PROC_FS
+#endif
+
+/****************************** PERIPHERAL ROLE *****************************/
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+#define is_peripheral_capable() (1)
+
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+
+#else
+
+#define is_peripheral_capable() (0)
+
+static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_g_reset(struct musb *m) {}
+static inline void musb_g_suspend(struct musb *m) {}
+static inline void musb_g_resume(struct musb *m) {}
+static inline void musb_g_wakeup(struct musb *m) {}
+static inline void musb_g_disconnect(struct musb *m) {}
+
+#endif
+
+/****************************** HOST ROLE ***********************************/
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+#define is_host_capable() (1)
+
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+
+#else
+
+#define is_host_capable() (0)
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_host_tx(struct musb *m, u8 e) {}
+static inline void musb_host_rx(struct musb *m, u8 e) {}
+
+#endif
+
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musb_h_ep0_state {
+ MUSB_EP0_IDLE,
+ MUSB_EP0_START, /* expect ack of setup */
+ MUSB_EP0_IN, /* expect IN DATA */
+ MUSB_EP0_OUT, /* expect ack of OUT DATA */
+ MUSB_EP0_STATUS, /* expect ack of STATUS */
+} __attribute__ ((packed));
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+ MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
+ MUSB_EP0_STAGE_TX, /* IN data */
+ MUSB_EP0_STAGE_RX, /* OUT data */
+ MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
+ MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
+ MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/* OTG protocol constants */
+#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
+#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
+#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* Endpoint registers (other than dynfifo setup) can be accessed either
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
+ || defined(CONFIG_ARCH_OMAP3430)
+/* REVISIT indexed access seemed to
+ * misbehave (on DaVinci) for at least peripheral IN ...
+ */
+#define MUSB_FLAT_REG
+#endif
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+#if defined(CONFIG_USB_TUSB6010)
+#define musb_ep_select(_mbase, _epnum) \
+ musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
+
+/* "flat" mapping: each endpoint has its own i/o address */
+#elif defined(MUSB_FLAT_REG)
+#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
+#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#else
+#define musb_ep_select(_mbase, _epnum) \
+ musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
+#endif
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_musb)\
+ { (_musb)->is_host = true; }
+#define MUSB_DEV_MODE(_musb) \
+ { (_musb)->is_host = false; }
+
+#define test_devctl_hst_mode(_x) \
+ (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
+
+#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
+
+/******************************** TYPES *************************************/
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+ struct musb *musb;
+ void __iomem *fifo;
+ void __iomem *regs;
+
+#ifdef CONFIG_USB_TUSB6010
+ void __iomem *conf;
+#endif
+
+ /* index in musb->endpoints[] */
+ u8 epnum;
+
+ /* hardware configuration, possibly dynamic */
+ bool is_shared_fifo;
+ bool tx_double_buffered;
+ bool rx_double_buffered;
+ u16 max_packet_sz_tx;
+ u16 max_packet_sz_rx;
+
+ struct dma_channel *tx_channel;
+ struct dma_channel *rx_channel;
+
+#ifdef CONFIG_USB_TUSB6010
+ /* TUSB has "asynchronous" and "synchronous" dma modes */
+ dma_addr_t fifo_async;
+ dma_addr_t fifo_sync;
+ void __iomem *fifo_sync_va;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ void __iomem *target_regs;
+
+ /* currently scheduled peripheral endpoint */
+ struct musb_qh *in_qh;
+ struct musb_qh *out_qh;
+
+ u8 rx_reinit;
+ u8 tx_reinit;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* peripheral side */
+ struct musb_ep ep_in; /* TX */
+ struct musb_ep ep_out; /* RX */
+#endif
+};
+
+static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_in);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_out);
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+ /* device lock */
+ spinlock_t lock;
+ struct clk *clock;
+ irqreturn_t (*isr)(int, void *);
+ struct work_struct irq_work;
+
+/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSB_PORT_STAT_RESUME (1 << 31)
+
+ u32 port1_status;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ unsigned long rh_timer;
+
+ enum musb_h_ep0_state ep0_stage;
+
+ /* bulk traffic normally dedicates endpoint hardware, and each
+ * direction has its own ring of host side endpoints.
+ * we try to progress the transfer at the head of each endpoint's
+ * queue until it completes or NAKs too much; then we try the next
+ * endpoint.
+ */
+ struct musb_hw_ep *bulk_ep;
+
+ struct list_head control; /* of musb_qh */
+ struct list_head in_bulk; /* of musb_qh */
+ struct list_head out_bulk; /* of musb_qh */
+ struct musb_qh *periodic[32]; /* tree of interrupt+iso */
+#endif
+
+ /* called with IRQs blocked; ON/nonzero implies starting a session,
+ * and waiting at least a_wait_vrise_tmout.
+ */
+ void (*board_set_vbus)(struct musb *, int is_on);
+
+ struct dma_controller *dma_controller;
+
+ struct device *controller;
+ void __iomem *ctrl_base;
+ void __iomem *mregs;
+
+#ifdef CONFIG_USB_TUSB6010
+ dma_addr_t async;
+ dma_addr_t sync;
+ void __iomem *sync_va;
+#endif
+
+ /* passed down from chip/board specific irq handlers */
+ u8 int_usb;
+ u16 int_rx;
+ u16 int_tx;
+
+ struct otg_transceiver xceiv;
+
+ int nIrq;
+
+ struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
+#define control_ep endpoints
+
+#define VBUSERR_RETRY_COUNT 3
+ u16 vbuserr_retry;
+ u16 epmask;
+ u8 nr_endpoints;
+
+ u8 board_mode; /* enum musb_mode */
+ int (*board_set_power)(int state);
+
+ int (*set_clock)(struct clk *clk, int is_active);
+
+ u8 min_power; /* vbus for periph, in mA/2 */
+
+ bool is_host;
+
+ int a_wait_bcon; /* VBUS timeout in msecs */
+ unsigned long idle_timeout; /* Next timeout in jiffies */
+
+ /* active means connected and not suspended */
+ unsigned is_active:1;
+
+ unsigned is_multipoint:1;
+ unsigned ignore_disconnect:1; /* during bus resets */
+
+#ifdef C_MP_TX
+ unsigned bulk_split:1;
+#define can_bulk_split(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+#else
+#define can_bulk_split(musb, type) 0
+#endif
+
+#ifdef C_MP_RX
+ unsigned bulk_combine:1;
+#define can_bulk_combine(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+#else
+#define can_bulk_combine(musb, type) 0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* is_suspended means USB B_PERIPHERAL suspend */
+ unsigned is_suspended:1;
+
+ /* may_wakeup means remote wakeup is enabled */
+ unsigned may_wakeup:1;
+
+ /* is_self_powered is reported in device status and the
+ * config descriptor. is_bus_powered means B_PERIPHERAL
+ * draws some VBUS current; both can be true.
+ */
+ unsigned is_self_powered:1;
+ unsigned is_bus_powered:1;
+
+ unsigned set_address:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+#endif
+
+ struct musb_hdrc_config *config;
+
+#ifdef MUSB_CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+#endif
+};
+
+static inline void musb_set_vbus(struct musb *musb, int is_on)
+{
+ musb->board_set_vbus(musb, is_on);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+ return container_of(g, struct musb, g);
+}
+#endif
+
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_start(struct musb *musb);
+extern void musb_stop(struct musb *musb);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+
+extern void musb_load_testpacket(struct musb *);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_platform_enable(struct musb *musb);
+extern void musb_platform_disable(struct musb *musb);
+
+extern void musb_hnp_stop(struct musb *musb);
+
+extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+
+#if defined(CONFIG_USB_TUSB6010) || \
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
+#else
+#define musb_platform_try_idle(x, y) do {} while (0)
+#endif
+
+#ifdef CONFIG_USB_TUSB6010
+extern int musb_platform_get_vbus_status(struct musb *musb);
+#else
+#define musb_platform_get_vbus_status(x) 0
+#endif
+
+extern int __init musb_platform_init(struct musb *musb);
+extern int musb_platform_exit(struct musb *musb);
+
+#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 0000000..4d27944
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,62 @@
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+ do { printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); } while (0)
+#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
+#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
+#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+#define xprintk(level, facility, format, args...) do { \
+ if (_dbg_level(level)) { \
+ printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); \
+ } } while (0)
+
+extern unsigned debug;
+
+static inline int _dbg_level(unsigned l)
+{
+ return debug >= l;
+}
+
+#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
+
+extern const char *otg_state_string(struct musb *);
+
+#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 0000000..0a2c4e3
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,172 @@
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores. On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does. Responsibilities
+ * of a DMA controller driver include:
+ *
+ * - Handling the details of moving multiple USB packets
+ * in cooperation with the Inventra USB core, including especially
+ * the correct RX side treatment of short packets and buffer-full
+ * states (both of which terminate transfers).
+ *
+ * - Knowing the correlation between dma channels and the
+ * Inventra core's local endpoint resources and data direction.
+ *
+ * - Maintaining a list of allocated/available channels.
+ *
+ * - Updating channel status on interrupts,
+ * whether shared with the Inventra core or separate.
+ */
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+#define is_dma_capable() (1)
+#else
+#define is_dma_capable() (0)
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#define is_cppi_enabled() 1
+#else
+#define is_cppi_enabled() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+ /* unallocated */
+ MUSB_DMA_STATUS_UNKNOWN,
+ /* allocated ... but not busy, no errors */
+ MUSB_DMA_STATUS_FREE,
+ /* busy ... transactions are active */
+ MUSB_DMA_STATUS_BUSY,
+ /* transaction(s) aborted due to ... dma or memory bus error */
+ MUSB_DMA_STATUS_BUS_ABORT,
+ /* transaction(s) aborted due to ... core error or USB fault */
+ MUSB_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ * transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+ void *private_data;
+ /* FIXME not void* private_data, but a dma_controller * */
+ size_t max_len;
+ size_t actual_len;
+ enum dma_channel_status status;
+ bool desired_mode;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status. If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+ return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @start: call this to start a DMA controller;
+ * return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ * return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ * returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+ int (*start)(struct dma_controller *);
+ int (*stop)(struct dma_controller *);
+ struct dma_channel *(*channel_alloc)(struct dma_controller *,
+ struct musb_hw_ep *, u8 is_tx);
+ void (*channel_release)(struct dma_channel *);
+ int (*channel_program)(struct dma_channel *channel,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr,
+ u32 length);
+ int (*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+
+extern struct dma_controller *__init
+dma_controller_create(struct musb *, void __iomem *);
+
+extern void dma_controller_destroy(struct dma_controller *);
+
+#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 0000000..d6a802c
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2031 @@
+/*
+ * MUSB OTG driver peripheral support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/dma-mapping.h>
+
+#include "musb_core.h"
+
+
+/* MUSB PERIPHERAL status 3-mar-2006:
+ *
+ * - EP0 seems solid. It passes both USBCV and usbtest control cases.
+ * Minor glitches:
+ *
+ * + remote wakeup to Linux hosts work, but saw USBCV failures;
+ * in one test run (operator error?)
+ * + endpoint halt tests -- in both usbtest and usbcv -- seem
+ * to break when dma is enabled ... is something wrongly
+ * clearing SENDSTALL?
+ *
+ * - Mass storage behaved ok when last tested. Network traffic patterns
+ * (with lots of short transfers etc) need retesting; they turn up the
+ * worst cases of the DMA, since short packets are typical but are not
+ * required.
+ *
+ * - TX/IN
+ * + both pio and dma behave in with network and g_zero tests
+ * + no cppi throughput issues other than no-hw-queueing
+ * + failed with FLAT_REG (DaVinci)
+ * + seems to behave with double buffering, PIO -and- CPPI
+ * + with gadgetfs + AIO, requests got lost?
+ *
+ * - RX/OUT
+ * + both pio and dma behave in with network and g_zero tests
+ * + dma is slow in typical case (short_not_ok is clear)
+ * + double buffering ok with PIO
+ * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
+ * + request lossage observed with gadgetfs
+ *
+ * - ISO not tested ... might work, but only weakly isochronous
+ *
+ * - Gadget driver disabling of softconnect during bind() is ignored; so
+ * drivers can't hold off host requests until userspace is ready.
+ * (Workaround: they can turn it off later.)
+ *
+ * - PORTABILITY (assumes PIO works):
+ * + DaVinci, basically works with cppi dma
+ * + OMAP 2430, ditto with mentor dma
+ * + TUSB 6010, platform-specific dma in the works
+ */
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Immediately complete a request.
+ *
+ * @param request the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+ struct musb_ep *ep,
+ struct usb_request *request,
+ int status)
+__releases(ep->musb->lock)
+__acquires(ep->musb->lock)
+{
+ struct musb_request *req;
+ struct musb *musb;
+ int busy = ep->busy;
+
+ req = to_musb_request(request);
+
+ list_del(&request->list);
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+ musb = req->musb;
+
+ ep->busy = 1;
+ spin_unlock(&musb->lock);
+ if (is_dma_capable()) {
+ if (req->mapped) {
+ dma_unmap_single(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->request.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ } else if (req->request.dma != DMA_ADDR_INVALID)
+ dma_sync_single_for_cpu(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ }
+ if (request->status == 0)
+ DBG(5, "%s done request %p, %d/%d\n",
+ ep->end_point.name, request,
+ req->request.actual, req->request.length);
+ else
+ DBG(2, "%s request %p, %d/%d fault %d\n",
+ ep->end_point.name, request,
+ req->request.actual, req->request.length,
+ request->status);
+ req->request.complete(&req->ep->end_point, &req->request);
+ spin_lock(&musb->lock);
+ ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+ struct musb_request *req = NULL;
+ void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
+
+ ep->busy = 1;
+
+ if (is_dma_capable() && ep->dma) {
+ struct dma_controller *c = ep->musb->dma_controller;
+ int value;
+ if (ep->is_in) {
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ } else {
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ }
+
+ value = c->channel_abort(ep->dma);
+ DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+ c->channel_release(ep->dma);
+ ep->dma = NULL;
+ }
+
+ while (!list_empty(&(ep->req_list))) {
+ req = container_of(ep->req_list.next, struct musb_request,
+ request.list);
+ musb_g_giveback(ep, &req->request, status);
+ }
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Data transfers - pure PIO, pure DMA, or mixed mode */
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+{
+ if (can_bulk_split(musb, ep->type))
+ return ep->hw_ep->max_packet_sz_tx;
+ else
+ return ep->packet_sz;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral tx (IN) using Mentor DMA works as follows:
+ Only mode 0 is used for transfers <= wPktSize,
+ mode 1 is used for larger transfers,
+
+ One of the following happens:
+ - Host sends IN token which causes an endpoint interrupt
+ -> TxAvail
+ -> if DMA is currently busy, exit.
+ -> if queue is non-empty, txstate().
+
+ - Request is queued by the gadget driver.
+ -> if queue was previously empty, txstate()
+
+ txstate()
+ -> start
+ /\ -> setup DMA
+ | (data is transferred to the FIFO, then sent out when
+ | IN token(s) are recd from Host.
+ | -> DMA interrupt on completion
+ | calls TxAvail.
+ | -> stop DMA, ~DmaEenab,
+ | -> set TxPktRdy for last short pkt or zlp
+ | -> Complete Request
+ | -> Continue next request (call txstate)
+ |___________________________________|
+
+ * Non-Mentor DMA engines can of course work differently, such as by
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+#endif
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *musb, struct musb_request *req)
+{
+ u8 epnum = req->epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct usb_request *request;
+ u16 fifo_count = 0, csr;
+ int use_dma = 0;
+
+ musb_ep = req->ep;
+
+ /* we shouldn't get here while DMA is active ... but we do ... */
+ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+ DBG(4, "dma pending...\n");
+ return;
+ }
+
+ /* read TXCSR before */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ request = &req->request;
+ fifo_count = min(max_ep_writesize(musb, musb_ep),
+ (int)(request->length - request->actual));
+
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ DBG(5, "%s old packet still ready , txcsr %03x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (csr & MUSB_TXCSR_P_SENDSTALL) {
+ DBG(5, "%s stalling, txcsr %03x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ epnum, musb_ep->packet_sz, fifo_count,
+ csr);
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ use_dma = (request->dma != DMA_ADDR_INVALID);
+
+ /* MUSB_TXCSR_P_ISO is still set correctly */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ {
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min(request->length,
+ musb_ep->dma->max_len);
+ if (request_size <= musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ musb_ep->dma->desired_mode,
+ request->dma, request_size);
+ if (use_dma) {
+ if (musb_ep->dma->desired_mode == 0) {
+ /* ASSERT: DMAENAB is clear */
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_MODE);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_MODE);
+
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR,
+ (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+ | csr);
+
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and Mentor
+ * fifos) just tells CPPI it could start. Data only moves
+ * to the USB TX fifo when both fifos are ready.
+ */
+
+ /* "mode" is irrelevant here; handle terminating ZLPs like
+ * PIO does, since the hardware RNDIS mode seems unreliable
+ * except for the last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma,
+ request->length);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ /* ASSERT: DMAENAB clear */
+ csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ /* invariant: prequest->buf is non-null */
+ }
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma,
+ request->length);
+#endif
+ }
+#endif
+
+ if (!use_dma) {
+ musb_write_fifo(musb_ep->hw_ep, fifo_count,
+ (u8 *) (request->buf + request->actual));
+ request->actual += fifo_count;
+ csr |= MUSB_TXCSR_TXPKTRDY;
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* host may already have the data when this message shows... */
+ DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ musb_ep->end_point.name, use_dma ? "dma" : "pio",
+ request->actual, request->length,
+ musb_readw(epio, MUSB_TXCSR),
+ fifo_count,
+ musb_readw(epio, MUSB_TXMAXP));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ, with controller locked.
+ */
+void musb_g_tx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct usb_request *request;
+ u8 __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+ request = next_request(musb_ep);
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+ do {
+ /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error
+ */
+ if (csr & MUSB_TXCSR_P_SENTSTALL) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~MUSB_TXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ }
+
+ if (request)
+ musb_g_giveback(musb_ep, request, -EPIPE);
+
+ break;
+ }
+
+ if (csr & MUSB_TXCSR_P_UNDERRUN) {
+ /* we NAKed, no big deal ... little reason to care */
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* SHOULD NOT HAPPEN ... has with cppi though, after
+ * changing SENDSTALL (and other cases); harmless?
+ */
+ DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+ break;
+ }
+
+ if (request) {
+ u8 is_dma = 0;
+
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ is_dma = 1;
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ request->actual += musb_ep->dma->actual_len;
+ DBG(4, "TXCSR%d %04x, dma off, "
+ "len %zu, req %p\n",
+ epnum, csr,
+ musb_ep->dma->actual_len,
+ request);
+ }
+
+ if (is_dma || request->actual == request->length) {
+
+ /* First, maybe a terminating short packet.
+ * Some DMA engines might handle this by
+ * themselves.
+ */
+ if ((request->zero
+ && request->length
+ && (request->length
+ % musb_ep->packet_sz)
+ == 0)
+#ifdef CONFIG_USB_INVENTRA_DMA
+ || (is_dma &&
+ ((!dma->desired_mode) ||
+ (request->actual &
+ (musb_ep->packet_sz - 1))))
+#endif
+ ) {
+ /* on dma completion, fifo may not
+ * be available yet ...
+ */
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ break;
+
+ DBG(4, "sending zero pkt\n");
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_MODE
+ | MUSB_TXCSR_TXPKTRDY);
+ request->zero = 0;
+ }
+
+ /* ... or if not, then complete it */
+ musb_g_giveback(musb_ep, request, 0);
+
+ /* kickstart next transfer if appropriate;
+ * the packet that just completed might not
+ * be transmitted for hours or days.
+ * REVISIT for double buffering...
+ * FIXME revisit for stalls too...
+ */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ break;
+ request = musb_ep->desc
+ ? next_request(musb_ep)
+ : NULL;
+ if (!request) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ break;
+ }
+ }
+
+ txstate(musb, to_musb_request(request));
+ }
+
+ } while (0);
+}
+
+/* ------------------------------------------------------------ */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral rx (OUT) using Mentor DMA works as follows:
+ - Only mode 0 is used.
+
+ - Request is queued by the gadget class driver.
+ -> if queue was previously empty, rxstate()
+
+ - Host sends OUT token which causes an endpoint interrupt
+ /\ -> RxReady
+ | -> if request queued, call rxstate
+ | /\ -> setup DMA
+ | | -> DMA interrupt on completion
+ | | -> RxReady
+ | | -> stop DMA
+ | | -> ack the read
+ | | -> if data recd = max expected
+ | | by the request, or host
+ | | sent a short packet,
+ | | complete the request,
+ | | and start the next one.
+ | |_____________________________________|
+ | else just wait for the host
+ | to send the next OUT token.
+ |__________________________________________________|
+
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+#endif
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *musb, struct musb_request *req)
+{
+ u16 csr = 0;
+ const u8 epnum = req->epnum;
+ struct usb_request *request = &req->request;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ u16 fifo_count = 0;
+ u16 len = musb_ep->packet_sz;
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+
+ if (is_cppi_enabled() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+
+ /* NOTE: CPPI won't actually stop advancing the DMA
+ * queue after short packet transfers, so this is almost
+ * always going to run as IRQ-per-packet DMA so that
+ * faults will be handled correctly.
+ */
+ if (c->channel_program(channel,
+ musb_ep->packet_sz,
+ !request->short_not_ok,
+ request->dma + request->actual,
+ request->length - request->actual)) {
+
+ /* make sure that if an rxpkt arrived after the irq,
+ * the cppi engine will be ready to take it as soon
+ * as DMA is enabled
+ */
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAMODE);
+ csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ return;
+ }
+ }
+
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ len = musb_readw(epio, MUSB_RXCOUNT);
+ if (request->actual < request->length) {
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ int use_dma = 0;
+
+ c = musb->dma_controller;
+ channel = musb_ep->dma;
+
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
+ csr |= MUSB_RXCSR_DMAENAB;
+#ifdef USE_MODE1
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ /* csr |= MUSB_RXCSR_DMAMODE; */
+
+ /* this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+#endif
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (request->actual < request->length) {
+ int transfer_size = 0;
+#ifdef USE_MODE1
+ transfer_size = min(request->length,
+ channel->max_len);
+#else
+ transfer_size = len;
+#endif
+ if (transfer_size <= musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = c->channel_program(
+ channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size);
+ }
+
+ if (use_dma)
+ return;
+ }
+#endif /* Mentor's DMA */
+
+ fifo_count = request->length - request->actual;
+ DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ musb_ep->end_point.name,
+ len, fifo_count,
+ musb_ep->packet_sz);
+
+ fifo_count = min(len, fifo_count);
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+ if (tusb_dma_omap() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+ u32 dma_addr = request->dma + request->actual;
+ int ret;
+
+ ret = c->channel_program(channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ dma_addr,
+ fifo_count);
+ if (ret)
+ return;
+ }
+#endif
+
+ musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+ (request->buf + request->actual));
+ request->actual += fifo_count;
+
+ /* REVISIT if we left anything in the fifo, flush
+ * it and report -EOVERFLOW
+ */
+
+ /* ack the read! */
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ /* reach the end or short packet detected */
+ if (request->actual == request->length || len < musb_ep->packet_sz)
+ musb_g_giveback(musb_ep, request, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ */
+void musb_g_rx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct usb_request *request;
+ void __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+
+ request = next_request(musb_ep);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+ DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+ csr, dma ? " (dma)" : "", request);
+
+ if (csr & MUSB_RXCSR_P_SENTSTALL) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ request->actual += musb_ep->dma->actual_len;
+ }
+
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (request)
+ musb_g_giveback(musb_ep, request, -EPIPE);
+ goto done;
+ }
+
+ if (csr & MUSB_RXCSR_P_OVERRUN) {
+ /* csr |= MUSB_RXCSR_P_WZC_BITS; */
+ csr &= ~MUSB_RXCSR_P_OVERRUN;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
+ if (request && request->status == -EINPROGRESS)
+ request->status = -EOVERFLOW;
+ }
+ if (csr & MUSB_RXCSR_INCOMPRX) {
+ /* REVISIT not necessarily an error */
+ DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* "should not happen"; likely RXPKTRDY pending for DMA */
+ DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
+ "%s busy, csr %04x\n",
+ musb_ep->end_point.name, csr);
+ goto done;
+ }
+
+ if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_P_WZC_BITS | csr);
+
+ request->actual += musb_ep->dma->actual_len;
+
+ DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
+ epnum, csr,
+ musb_readw(epio, MUSB_RXCSR),
+ musb_ep->dma->actual_len, request);
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+ /* Autoclear doesn't clear RxPktRdy for short packets */
+ if ((dma->desired_mode == 0)
+ || (dma->actual_len
+ & (musb_ep->packet_sz - 1))) {
+ /* ack the read! */
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* incomplete, and not short? wait for next IN packet */
+ if ((request->actual < request->length)
+ && (musb_ep->dma->actual_len
+ == musb_ep->packet_sz))
+ goto done;
+#endif
+ musb_g_giveback(musb_ep, request, 0);
+
+ request = next_request(musb_ep);
+ if (!request)
+ goto done;
+
+ /* don't start more i/o till the stall clears */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_P_SENDSTALL)
+ goto done;
+ }
+
+
+ /* analyze request if the ep is hot */
+ if (request)
+ rxstate(musb, to_musb_request(request));
+ else
+ DBG(3, "packet waiting for %s%s request\n",
+ musb_ep->desc ? "" : "inactive ",
+ musb_ep->end_point.name);
+
+done:
+ return;
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *hw_ep;
+ void __iomem *regs;
+ struct musb *musb;
+ void __iomem *mbase;
+ u8 epnum;
+ u16 csr;
+ unsigned tmp;
+ int status = -EINVAL;
+
+ if (!ep || !desc)
+ return -EINVAL;
+
+ musb_ep = to_musb_ep(ep);
+ hw_ep = musb_ep->hw_ep;
+ regs = hw_ep->regs;
+ musb = musb_ep->musb;
+ mbase = musb->mregs;
+ epnum = musb_ep->current_epnum;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_ep->desc) {
+ status = -EBUSY;
+ goto fail;
+ }
+ musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* check direction and (later) maxpacket size against endpoint */
+ if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
+ goto fail;
+
+ /* REVISIT this rules out high bandwidth periodic transfers */
+ tmp = le16_to_cpu(desc->wMaxPacketSize);
+ if (tmp & ~0x07ff)
+ goto fail;
+ musb_ep->packet_sz = tmp;
+
+ /* enable the interrupts for the endpoint, set the endpoint
+ * packet size (or fail), set the mode, clear the fifo
+ */
+ musb_ep_select(mbase, epnum);
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 1;
+ if (!musb_ep->is_in)
+ goto fail;
+ if (tmp > hw_ep->max_packet_sz_tx)
+ goto fail;
+
+ int_txe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* REVISIT if can_bulk_split(), use by updating "tmp";
+ * likewise high bandwidth periodic tx
+ */
+ musb_writew(regs, MUSB_TXMAXP, tmp);
+
+ csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
+ if (musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_TXCSR_P_ISO;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(regs, MUSB_TXCSR, csr);
+
+ } else {
+ u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 0;
+ if (musb_ep->is_in)
+ goto fail;
+ if (tmp > hw_ep->max_packet_sz_rx)
+ goto fail;
+
+ int_rxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRRXE, int_rxe);
+
+ /* REVISIT if can_bulk_combine() use by updating "tmp"
+ * likewise high bandwidth periodic rx
+ */
+ musb_writew(regs, MUSB_RXMAXP, tmp);
+
+ /* force shared fifo to OUT-only mode */
+ if (hw_ep->is_shared_fifo) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ }
+
+ csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_RXCSR_P_ISO;
+ else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_RXCSR, csr);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* NOTE: all the I/O code _should_ work fine without DMA, in case
+ * for some reason you run out of channels here.
+ */
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep->dma = c->channel_alloc(c, hw_ep,
+ (desc->bEndpointAddress & USB_DIR_IN));
+ } else
+ musb_ep->dma = NULL;
+
+ musb_ep->desc = desc;
+ musb_ep->busy = 0;
+ status = 0;
+
+ pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+ musb_driver_name, musb_ep->end_point.name,
+ ({ char *s; switch (musb_ep->type) {
+ case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
+ case USB_ENDPOINT_XFER_INT: s = "int"; break;
+ default: s = "iso"; break;
+ }; s; }),
+ musb_ep->is_in ? "IN" : "OUT",
+ musb_ep->dma ? "dma, " : "",
+ musb_ep->packet_sz);
+
+ schedule_work(&musb->irq_work);
+
+fail:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+ unsigned long flags;
+ struct musb *musb;
+ u8 epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio;
+ int status = 0;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+ epnum = musb_ep->current_epnum;
+ epio = musb->endpoints[epnum].regs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(musb->mregs, epnum);
+
+ /* zero the endpoint sizes */
+ if (musb_ep->is_in) {
+ u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
+ int_txe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
+ musb_writew(epio, MUSB_TXMAXP, 0);
+ } else {
+ u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
+ int_rxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
+ musb_writew(epio, MUSB_RXMAXP, 0);
+ }
+
+ musb_ep->desc = NULL;
+
+ /* abort all pending DMA and requests */
+ nuke(musb_ep, -ESHUTDOWN);
+
+ schedule_work(&musb->irq_work);
+
+ spin_unlock_irqrestore(&(musb->lock), flags);
+
+ DBG(2, "%s\n", musb_ep->end_point.name);
+
+ return status;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb_request *request = NULL;
+
+ request = kzalloc(sizeof *request, gfp_flags);
+ if (request) {
+ INIT_LIST_HEAD(&request->request.list);
+ request->request.dma = DMA_ADDR_INVALID;
+ request->epnum = musb_ep->current_epnum;
+ request->ep = musb_ep;
+ }
+
+ return &request->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(to_musb_request(req));
+}
+
+static LIST_HEAD(buffers);
+
+struct free_record {
+ struct list_head list;
+ struct device *dev;
+ unsigned bytes;
+ dma_addr_t dma;
+};
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+{
+ DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+ req->tx ? "TX/IN" : "RX/OUT",
+ &req->request, req->request.length, req->epnum);
+
+ musb_ep_select(musb->mregs, req->epnum);
+ if (req->tx)
+ txstate(musb, req);
+ else
+ rxstate(musb, req);
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep;
+ struct musb_request *request;
+ struct musb *musb;
+ int status = 0;
+ unsigned long lockflags;
+
+ if (!ep || !req)
+ return -EINVAL;
+ if (!req->buf)
+ return -ENODATA;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+
+ request = to_musb_request(req);
+ request->musb = musb;
+
+ if (request->ep != musb_ep)
+ return -EINVAL;
+
+ DBG(4, "<== to %s request=%p\n", ep->name, req);
+
+ /* request is mine now... */
+ request->request.actual = 0;
+ request->request.status = -EINPROGRESS;
+ request->epnum = musb_ep->current_epnum;
+ request->tx = musb_ep->is_in;
+
+ if (is_dma_capable() && musb_ep->dma) {
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ request->request.dma = dma_map_single(
+ musb->controller,
+ request->request.buf,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->mapped = 1;
+ } else {
+ dma_sync_single_for_device(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->mapped = 0;
+ }
+ } else if (!req->buf) {
+ return -ENODATA;
+ } else
+ request->mapped = 0;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ /* don't queue if the ep is down */
+ if (!musb_ep->desc) {
+ DBG(4, "req %p queued to %s while ep %s\n",
+ req, ep->name, "disabled");
+ status = -ESHUTDOWN;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(request->request.list), &(musb_ep->req_list));
+
+ /* it this is the head of the queue, start i/o ... */
+ if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+ musb_ep_restart(musb, request);
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct usb_request *r;
+ unsigned long flags;
+ int status = 0;
+ struct musb *musb = musb_ep->musb;
+
+ if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+ return -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ list_for_each_entry(r, &musb_ep->req_list, list) {
+ if (r == request)
+ break;
+ }
+ if (r != request) {
+ DBG(3, "request %p not queued to %s\n", request, ep->name);
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* if the hardware doesn't have the request, easy ... */
+ if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+
+ /* ... else abort the dma transfer ... */
+ else if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep_select(musb->mregs, musb_ep->current_epnum);
+ if (c->channel_abort)
+ status = c->channel_abort(musb_ep->dma);
+ else
+ status = -EBUSY;
+ if (status == 0)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ } else {
+ /* NOTE: by sticking to easily tested hardware/driver states,
+ * we leave counting of in-flight packets imprecise.
+ */
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ }
+
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ u8 epnum = musb_ep->current_epnum;
+ struct musb *musb = musb_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr;
+ struct musb_request *request = NULL;
+ int status = 0;
+
+ if (!ep)
+ return -EINVAL;
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
+ status = -EINVAL;
+ goto done;
+ }
+
+ musb_ep_select(mbase, epnum);
+
+ /* cannot portably stall with non-empty FIFO */
+ request = to_musb_request(next_request(musb_ep));
+ if (value && musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(3, "%s fifo busy, cannot halt\n", ep->name);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -EAGAIN;
+ }
+
+ }
+
+ /* set/clear the stall and toggle bits */
+ DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_WZC_BITS
+ | MUSB_TXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_TXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_P_SENTSTALL);
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_WZC_BITS
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_RXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+done:
+
+ /* maybe start the first request in the queue */
+ if (!musb_ep->busy && !value && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ void __iomem *epio = musb_ep->hw_ep->regs;
+ int retval = -EINVAL;
+
+ if (musb_ep->desc && !musb_ep->is_in) {
+ struct musb *musb = musb_ep->musb;
+ int epnum = musb_ep->current_epnum;
+ void __iomem *mbase = musb->mregs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb_ep_select(mbase, epnum);
+ /* FIXME return zero unless RXPKTRDY is set */
+ retval = musb_readw(epio, MUSB_RXCOUNT);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb *musb = musb_ep->musb;
+ u8 epnum = musb_ep->current_epnum;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr, int_txe;
+
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(mbase, (u8) epnum);
+
+ /* disable interrupts */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+ .enable = musb_gadget_enable,
+ .disable = musb_gadget_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_gadget_queue,
+ .dequeue = musb_gadget_dequeue,
+ .set_halt = musb_gadget_set_halt,
+ .fifo_status = musb_gadget_fifo_status,
+ .fifo_flush = musb_gadget_fifo_flush
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return (int)musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ void __iomem *mregs = musb->mregs;
+ unsigned long flags;
+ int status = -EINVAL;
+ u8 power, devctl;
+ int retries;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ /* NOTE: OTG state machine doesn't include B_SUSPENDED;
+ * that's part of the standard usb 1.1 state machine, and
+ * doesn't affect OTG transitions.
+ */
+ if (musb->may_wakeup && musb->is_suspended)
+ break;
+ goto done;
+ case OTG_STATE_B_IDLE:
+ /* Start SRP ... OTG not required. */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ DBG(2, "Sending SRP: devctl: %02x\n", devctl);
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mregs, MUSB_DEVCTL, devctl);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ retries = 100;
+ while (!(devctl & MUSB_DEVCTL_SESSION)) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+ retries = 10000;
+ while (devctl & MUSB_DEVCTL_SESSION) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+
+ /* Block idling for at least 1s */
+ musb_platform_try_idle(musb,
+ jiffies + msecs_to_jiffies(1 * HZ));
+
+ status = 0;
+ goto done;
+ default:
+ DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
+ goto done;
+ }
+
+ status = 0;
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+ DBG(2, "issue wakeup\n");
+
+ /* FIXME do this next chunk in a timer callback, no udelay */
+ mdelay(2);
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ musb->is_self_powered = !!is_selfpowered;
+ return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ if (is_on)
+ power |= MUSB_POWER_SOFTCONN;
+ else
+ power &= ~MUSB_POWER_SOFTCONN;
+
+ /* FIXME if on, HdrcStart; if off, HdrcStop */
+
+ DBG(3, "gadget %s D+ pullup %s\n",
+ musb->gadget_driver->function, is_on ? "on" : "off");
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ DBG(2, "<= %s =>\n", __func__);
+
+ /*
+ * FIXME iff driver's softconnect flag is set (as it is during probe,
+ * though that can clear it), just musb_pullup().
+ */
+
+ return -EINVAL;
+}
+#endif
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ if (!musb->xceiv.set_power)
+ return -EOPNOTSUPP;
+ return otg_set_power(&musb->xceiv, mA);
+}
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ unsigned long flags;
+
+ is_on = !!is_on;
+
+ /* NOTE: this assumes we are sensing vbus; we'd rather
+ * not pullup unless the B-session is active.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (is_on != musb->softconnect) {
+ musb->softconnect = is_on;
+ musb_pullup(musb, is_on);
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+ .get_frame = musb_gadget_get_frame,
+ .wakeup = musb_gadget_wakeup,
+ .set_selfpowered = musb_gadget_set_self_powered,
+ /* .vbus_session = musb_gadget_vbus_session, */
+ .vbus_draw = musb_gadget_vbus_draw,
+ .pullup = musb_gadget_pullup,
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* Registration */
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port. It assumes
+ * all peripheral ports are external...
+ */
+static struct musb *the_gadget;
+
+static void musb_gadget_release(struct device *dev)
+{
+ /* kref_put(WHAT) */
+ dev_dbg(dev, "%s\n", __func__);
+}
+
+
+static void __init
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+{
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+
+ memset(ep, 0, sizeof *ep);
+
+ ep->current_epnum = epnum;
+ ep->musb = musb;
+ ep->hw_ep = hw_ep;
+ ep->is_in = is_in;
+
+ INIT_LIST_HEAD(&ep->req_list);
+
+ sprintf(ep->name, "ep%d%s", epnum,
+ (!epnum || hw_ep->is_shared_fifo) ? "" : (
+ is_in ? "in" : "out"));
+ ep->end_point.name = ep->name;
+ INIT_LIST_HEAD(&ep->end_point.ep_list);
+ if (!epnum) {
+ ep->end_point.maxpacket = 64;
+ ep->end_point.ops = &musb_g_ep0_ops;
+ musb->g.ep0 = &ep->end_point;
+ } else {
+ if (is_in)
+ ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+ else
+ ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+ ep->end_point.ops = &musb_ep_ops;
+ list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+ }
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void __init musb_g_init_endpoints(struct musb *musb)
+{
+ u8 epnum;
+ struct musb_hw_ep *hw_ep;
+ unsigned count = 0;
+
+ /* intialize endpoint list just once */
+ INIT_LIST_HEAD(&(musb->g.ep_list));
+
+ for (epnum = 0, hw_ep = musb->endpoints;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
+ count++;
+ } else {
+ if (hw_ep->max_packet_sz_tx) {
+ init_peripheral_ep(musb, &hw_ep->ep_in,
+ epnum, 1);
+ count++;
+ }
+ if (hw_ep->max_packet_sz_rx) {
+ init_peripheral_ep(musb, &hw_ep->ep_out,
+ epnum, 0);
+ count++;
+ }
+ }
+ }
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int __init musb_gadget_setup(struct musb *musb)
+{
+ int status;
+
+ /* REVISIT minor race: if (erroneously) setting up two
+ * musb peripherals at the same time, only the bus lock
+ * is probably held.
+ */
+ if (the_gadget)
+ return -EBUSY;
+ the_gadget = musb;
+
+ musb->g.ops = &musb_gadget_operations;
+ musb->g.is_dualspeed = 1;
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* this "gadget" abstracts/virtualizes the controller */
+ strcpy(musb->g.dev.bus_id, "gadget");
+ musb->g.dev.parent = musb->controller;
+ musb->g.dev.dma_mask = musb->controller->dma_mask;
+ musb->g.dev.release = musb_gadget_release;
+ musb->g.name = musb_driver_name;
+
+ if (is_otg_enabled(musb))
+ musb->g.is_otg = 1;
+
+ musb_g_init_endpoints(musb);
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+
+ status = device_register(&musb->g.dev);
+ if (status != 0)
+ the_gadget = NULL;
+ return status;
+}
+
+void musb_gadget_cleanup(struct musb *musb)
+{
+ if (musb != the_gadget)
+ return;
+
+ device_unregister(&musb->g.dev);
+ the_gadget = NULL;
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memeory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ int retval;
+ unsigned long flags;
+ struct musb *musb = the_gadget;
+
+ if (!driver
+ || driver->speed != USB_SPEED_HIGH
+ || !driver->bind
+ || !driver->setup)
+ return -EINVAL;
+
+ /* driver must be initialized to support peripheral mode */
+ if (!musb || !(musb->board_mode == MUSB_OTG
+ || musb->board_mode != MUSB_OTG)) {
+ DBG(1, "%s, no dev??\n", __func__);
+ return -ENODEV;
+ }
+
+ DBG(3, "registering driver %s\n", driver->function);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->gadget_driver) {
+ DBG(1, "%s is already bound to %s\n",
+ musb_driver_name,
+ musb->gadget_driver->driver.name);
+ retval = -EBUSY;
+ } else {
+ musb->gadget_driver = driver;
+ musb->g.dev.driver = &driver->driver;
+ driver->driver.bus = NULL;
+ musb->softconnect = 1;
+ retval = 0;
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (retval == 0) {
+ retval = driver->bind(&musb->g);
+ if (retval != 0) {
+ DBG(3, "bind to driver %s failed --> %d\n",
+ driver->driver.name, retval);
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* REVISIT always use otg_set_peripheral(), handling
+ * issues including the root hub one below ...
+ */
+ musb->xceiv.gadget = &musb->g;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->is_active = 1;
+
+ /* FIXME this ignores the softconnect flag. Drivers are
+ * allowed hold the peripheral inactive until for example
+ * userspace hooks up printer hardware or DSP codecs, so
+ * hosts only see fully functional devices.
+ */
+
+ if (!is_otg_enabled(musb))
+ musb_start(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (is_otg_enabled(musb)) {
+ DBG(3, "OTG startup...\n");
+
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+ if (retval < 0) {
+ DBG(1, "add_hcd failed, %d\n", retval);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->xceiv.gadget = NULL;
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ }
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+{
+ int i;
+ struct musb_hw_ep *hw_ep;
+
+ /* don't disconnect if it's not connected */
+ if (musb->g.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ else
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* deactivate the hardware */
+ if (musb->softconnect) {
+ musb->softconnect = 0;
+ musb_pullup(musb, 0);
+ }
+ musb_stop(musb);
+
+ /* killing any outstanding requests will quiesce the driver;
+ * then report disconnect
+ */
+ if (driver) {
+ for (i = 0, hw_ep = musb->endpoints;
+ i < musb->nr_endpoints;
+ i++, hw_ep++) {
+ musb_ep_select(musb->mregs, i);
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ } else {
+ if (hw_ep->max_packet_sz_tx)
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ if (hw_ep->max_packet_sz_rx)
+ nuke(&hw_ep->ep_out, -ESHUTDOWN);
+ }
+ }
+
+ spin_unlock(&musb->lock);
+ driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ unsigned long flags;
+ int retval = 0;
+ struct musb *musb = the_gadget;
+
+ if (!driver || !driver->unbind || !musb)
+ return -EINVAL;
+
+ /* REVISIT always use otg_set_peripheral() here too;
+ * this needs to shut down the OTG engine.
+ */
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+#ifdef CONFIG_USB_MUSB_OTG
+ musb_hnp_stop(musb);
+#endif
+
+ if (musb->gadget_driver == driver) {
+
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, driver);
+
+ DBG(3, "unregistering driver %s\n", driver->function);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ driver->unbind(&musb->g);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+ } else
+ retval = -EINVAL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (is_otg_enabled(musb) && retval == 0) {
+ usb_remove_hcd(musb_to_hcd(musb));
+ /* FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/* ----------------------------------------------------------------------- */
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *musb)
+{
+ musb->is_suspended = 0;
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_active = 1;
+ if (musb->gadget_driver && musb->gadget_driver->resume) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->resume(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ WARNING("unhandled RESUME transition (%s)\n",
+ otg_state_string(musb));
+ }
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *musb)
+{
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ DBG(3, "devctl %02x\n", devctl);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_suspended = 1;
+ if (musb->gadget_driver && musb->gadget_driver->suspend) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->suspend(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+ * A_PERIPHERAL may need care too
+ */
+ WARNING("unhandled SUSPEND transition (%s)\n",
+ otg_state_string(musb));
+ }
+}
+
+/* Called during SRP */
+void musb_g_wakeup(struct musb *musb)
+{
+ musb_gadget_wakeup(&musb->g);
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *musb)
+{
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+
+ DBG(3, "devctl %02x\n", devctl);
+
+ /* clear HR */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
+
+ /* don't draw vbus until new b-default session */
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->g.speed = USB_SPEED_UNKNOWN;
+ if (musb->gadget_driver && musb->gadget_driver->disconnect) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+
+ switch (musb->xceiv.state) {
+ default:
+#ifdef CONFIG_USB_MUSB_OTG
+ DBG(2, "Unhandled disconnect %s, setting a_idle\n",
+ otg_state_string(musb));
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ break;
+ }
+
+ musb->is_active = 0;
+}
+
+void musb_g_reset(struct musb *musb)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
+ u8 power;
+
+ DBG(3, "<== %s addr=%x driver '%s'\n",
+ (devctl & MUSB_DEVCTL_BDEVICE)
+ ? "B-Device" : "A-Device",
+ musb_readb(mbase, MUSB_FADDR),
+ musb->gadget_driver
+ ? musb->gadget_driver->driver.name
+ : NULL
+ );
+
+ /* report disconnect, if we didn't already (flushing EP state) */
+ if (musb->g.speed != USB_SPEED_UNKNOWN)
+ musb_g_disconnect(musb);
+
+ /* clear HR */
+ else if (devctl & MUSB_DEVCTL_HR)
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+
+
+ /* what speed did we negotiate? */
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ /* start in USB_STATE_DEFAULT */
+ musb->is_active = 1;
+ musb->is_suspended = 0;
+ MUSB_DEV_MODE(musb);
+ musb->address = 0;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+
+ musb->may_wakeup = 0;
+ musb->g.b_hnp_enable = 0;
+ musb->g.a_alt_hnp_support = 0;
+ musb->g.a_hnp_support = 0;
+
+ /* Normal reset, as B-Device;
+ * or else after HNP, as A-Device
+ */
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else if (is_otg_enabled(musb)) {
+ musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ } else
+ WARN_ON(1);
+
+ /* start with default limits on VBUS power draw */
+ (void) musb_gadget_vbus_draw(&musb->g,
+ is_otg_enabled(musb) ? 8 : 100);
+}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 0000000..59502da
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,108 @@
+/*
+ * MUSB OTG driver peripheral defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+struct musb_request {
+ struct usb_request request;
+ struct musb_ep *ep;
+ struct musb *musb;
+ u8 tx; /* endpoint direction */
+ u8 epnum;
+ u8 mapped;
+};
+
+static inline struct musb_request *to_musb_request(struct usb_request *req)
+{
+ return req ? container_of(req, struct musb_request, request) : NULL;
+}
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+ /* stuff towards the head is basically write-once. */
+ struct usb_ep end_point;
+ char name[12];
+ struct musb_hw_ep *hw_ep;
+ struct musb *musb;
+ u8 current_epnum;
+
+ /* ... when enabled/disabled ... */
+ u8 type;
+ u8 is_in;
+ u16 packet_sz;
+ const struct usb_endpoint_descriptor *desc;
+ struct dma_channel *dma;
+
+ /* later things are modified based on usage */
+ struct list_head req_list;
+
+ /* true if lock must be dropped but req_list may not be advanced */
+ u8 busy;
+};
+
+static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
+{
+ return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
+}
+
+static inline struct usb_request *next_request(struct musb_ep *ep)
+{
+ struct list_head *queue = &ep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+ return container_of(queue->next, struct usb_request, list);
+}
+
+extern void musb_g_tx(struct musb *musb, u8 epnum);
+extern void musb_g_rx(struct musb *musb, u8 epnum);
+
+extern const struct usb_ep_ops musb_g_ep0_ops;
+
+extern int musb_gadget_setup(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+
+#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 0000000..a57652f
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,983 @@
+/*
+ * MUSB OTG peripheral driver ep0 handling
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musb_core.h"
+
+/* ep0 is always musb->endpoints[0].ep_in */
+#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
+
+/*
+ * locking note: we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers. And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+ switch (stage) {
+ case MUSB_EP0_STAGE_SETUP: return "idle";
+ case MUSB_EP0_STAGE_TX: return "in";
+ case MUSB_EP0_STAGE_RX: return "out";
+ case MUSB_EP0_STAGE_ACKWAIT: return "wait";
+ case MUSB_EP0_STAGE_STATUSIN: return "in/status";
+ case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
+ default: return "?";
+ }
+}
+
+/* handle a standard GET_STATUS request
+ * Context: caller holds controller lock
+ */
+static int service_tx_status_request(
+ struct musb *musb,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ void __iomem *mbase = musb->mregs;
+ int handled = 1;
+ u8 result[2], epnum = 0;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ result[1] = 0;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+#ifdef CONFIG_USB_MUSB_OTG
+ if (musb->g.is_otg) {
+ result[0] |= musb->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ result[0] |= musb->g.a_alt_hnp_support
+ << USB_DEVICE_A_ALT_HNP_SUPPORT;
+ result[0] |= musb->g.a_hnp_support
+ << USB_DEVICE_A_HNP_SUPPORT;
+ }
+#endif
+ break;
+
+ case USB_RECIP_INTERFACE:
+ result[0] = 0;
+ break;
+
+ case USB_RECIP_ENDPOINT: {
+ int is_in;
+ struct musb_ep *ep;
+ u16 tmp;
+ void __iomem *regs;
+
+ epnum = (u8) ctrlrequest->wIndex;
+ if (!epnum) {
+ result[0] = 0;
+ break;
+ }
+
+ is_in = epnum & USB_DIR_IN;
+ if (is_in) {
+ epnum &= 0x0f;
+ ep = &musb->endpoints[epnum].ep_in;
+ } else {
+ ep = &musb->endpoints[epnum].ep_out;
+ }
+ regs = musb->endpoints[epnum].regs;
+
+ if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+
+ musb_ep_select(mbase, epnum);
+ if (is_in)
+ tmp = musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_P_SENDSTALL;
+ else
+ tmp = musb_readw(regs, MUSB_RXCSR)
+ & MUSB_RXCSR_P_SENDSTALL;
+ musb_ep_select(mbase, 0);
+
+ result[0] = tmp ? 1 : 0;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ /* fill up the fifo; caller updates csr0 */
+ if (handled > 0) {
+ u16 len = le16_to_cpu(ctrlrequest->wLength);
+
+ if (len > 2)
+ len = 2;
+ musb_write_fifo(&musb->endpoints[0], len, result);
+ }
+
+ return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+{
+ int handled = 0; /* not handled */
+
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_GET_STATUS:
+ handled = service_tx_status_request(musb,
+ ctrlrequest);
+ break;
+
+ /* case USB_REQ_SYNC_FRAME: */
+
+ default:
+ break;
+ }
+ }
+ return handled;
+}
+
+/*
+ * Context: caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
+{
+ musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+}
+
+/*
+ * Tries to start B-device HNP negotiation if enabled via sysfs
+ */
+static inline void musb_try_b_hnp_enable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl;
+
+ DBG(1, "HNP: Setting HR\n");
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+ musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
+}
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ * always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ * always handled here, except for class/vendor/... features
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *musb,
+ struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int handled = -EINVAL;
+ void __iomem *mbase = musb->mregs;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ /* the gadget driver handles everything except what we MUST handle */
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ musb->set_address = true;
+ musb->address = (u8) (ctrlrequest->wValue & 0x7f);
+ handled = 1;
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ if (ctrlrequest->wValue
+ != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+ musb->may_wakeup = 0;
+ handled = 1;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:{
+ const u8 num = ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+
+ if (num == 0
+ || num >= MUSB_C_NUM_EPS
+ || ctrlrequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ if (ctrlrequest->wIndex & USB_DIR_IN)
+ musb_ep = &musb->endpoints[num].ep_in;
+ else
+ musb_ep = &musb->endpoints[num].ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ /* REVISIT do it directly, no locking games */
+ spin_unlock(&musb->lock);
+ musb_gadget_set_halt(&musb_ep->end_point, 0);
+ spin_lock(&musb->lock);
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ handled = 1;
+ switch (ctrlrequest->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ musb->may_wakeup = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (musb->g.speed != USB_SPEED_HIGH)
+ goto stall;
+ if (ctrlrequest->wIndex & 0xff)
+ goto stall;
+
+ switch (ctrlrequest->wIndex >> 8) {
+ case 1:
+ pr_debug("TEST_J\n");
+ /* TEST_J */
+ musb->test_mode_nr =
+ MUSB_TEST_J;
+ break;
+ case 2:
+ /* TEST_K */
+ pr_debug("TEST_K\n");
+ musb->test_mode_nr =
+ MUSB_TEST_K;
+ break;
+ case 3:
+ /* TEST_SE0_NAK */
+ pr_debug("TEST_SE0_NAK\n");
+ musb->test_mode_nr =
+ MUSB_TEST_SE0_NAK;
+ break;
+ case 4:
+ /* TEST_PACKET */
+ pr_debug("TEST_PACKET\n");
+ musb->test_mode_nr =
+ MUSB_TEST_PACKET;
+ break;
+ default:
+ goto stall;
+ }
+
+ /* enter test mode after irq */
+ if (handled > 0)
+ musb->test_mode = true;
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.b_hnp_enable = 1;
+ musb_try_b_hnp_enable(musb);
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_alt_hnp_support = 1;
+ break;
+#endif
+stall:
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_ENDPOINT:{
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
+
+ if (epnum == 0
+ || epnum >= MUSB_C_NUM_EPS
+ || ctrlrequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
+ else
+ musb_ep = &ep->ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs,
+ MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_TXCSR,
+ csr);
+ } else {
+ csr = musb_readw(regs,
+ MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR,
+ csr);
+ }
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+ } else
+ handled = 0;
+ return handled;
+}
+
+/* we have an ep0out data packet
+ * Context: caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct usb_request *req;
+ u16 tmp;
+
+ req = next_ep0_request(musb);
+
+ /* read packet and ack; or stall because of gadget driver bug:
+ * should have provided the rx buffer before setup() returned.
+ */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned len = req->length - req->actual;
+
+ /* read the buffer */
+ tmp = musb_readb(regs, MUSB_COUNT0);
+ if (tmp > len) {
+ req->status = -EOVERFLOW;
+ tmp = len;
+ }
+ musb_read_fifo(&musb->endpoints[0], tmp, buf);
+ req->actual += tmp;
+ tmp = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (tmp < 64 || req->actual == req->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ tmp |= MUSB_CSR0_P_DATAEND;
+ } else
+ req = NULL;
+ } else
+ tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+
+
+ /* Completion handler may choose to stall, e.g. because the
+ * message just received holds invalid data.
+ */
+ if (req) {
+ musb->ackpend = tmp;
+ musb_g_ep0_giveback(musb, req);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+ musb_ep_select(musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, tmp);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context: caller holds controller lock
+ */
+static void ep0_txstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct usb_request *request = next_ep0_request(musb);
+ u16 csr = MUSB_CSR0_TXPKTRDY;
+ u8 *fifo_src;
+ u8 fifo_count;
+
+ if (!request) {
+ /* WARN_ON(1); */
+ DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+ return;
+ }
+
+ /* load the data */
+ fifo_src = (u8 *) request->buf + request->actual;
+ fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+ request->length - request->actual);
+ musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+ request->actual += fifo_count;
+
+ /* update the flags */
+ if (fifo_count < MUSB_MAX_END0_PACKET
+ || request->actual == request->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ csr |= MUSB_CSR0_P_DATAEND;
+ } else
+ request = NULL;
+
+ /* report completions as soon as the fifo's loaded; there's no
+ * win in waiting till this last packet gets acked. (other than
+ * very precise fault reporting, needed by USB TMC; possible with
+ * this hardware, but not usable from portable gadget drivers.)
+ */
+ if (request) {
+ musb->ackpend = csr;
+ musb_g_ep0_giveback(musb, request);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ musb_ep_select(musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context: caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+{
+ struct usb_request *r;
+ void __iomem *regs = musb->control_ep->regs;
+
+ musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+ /* NOTE: earlier 2.6 versions changed setup packets to host
+ * order, but now USB packets always stay in USB byte order.
+ */
+ DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ req->bRequestType,
+ req->bRequest,
+ le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex),
+ le16_to_cpu(req->wLength));
+
+ /* clean up any leftover transfers */
+ r = next_ep0_request(musb);
+ if (r)
+ musb_g_ep0_giveback(musb, r);
+
+ /* For zero-data requests we want to delay the STATUS stage to
+ * avoid SETUPEND errors. If we read data (OUT), delay accepting
+ * packets until there's a buffer to store them in.
+ *
+ * If we write data, the controller acts happier if we enable
+ * the TX FIFO right away, and give the controller a moment
+ * to switch modes...
+ */
+ musb->set_address = false;
+ musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (req->wLength == 0) {
+ if (req->bRequestType & USB_DIR_IN)
+ musb->ackpend |= MUSB_CSR0_TXPKTRDY;
+ musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
+ } else if (req->bRequestType & USB_DIR_IN) {
+ musb->ep0_state = MUSB_EP0_STAGE_TX;
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
+ while ((musb_readw(regs, MUSB_CSR0)
+ & MUSB_CSR0_RXPKTRDY) != 0)
+ cpu_relax();
+ musb->ackpend = 0;
+ } else
+ musb->ep0_state = MUSB_EP0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int retval;
+ if (!musb->gadget_driver)
+ return -EOPNOTSUPP;
+ spin_unlock(&musb->lock);
+ retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
+ spin_lock(&musb->lock);
+ return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+ u16 csr;
+ u16 len;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *regs = musb->endpoints[0].regs;
+ irqreturn_t retval = IRQ_NONE;
+
+ musb_ep_select(mbase, 0); /* select ep0 */
+ csr = musb_readw(regs, MUSB_CSR0);
+ len = musb_readb(regs, MUSB_COUNT0);
+
+ DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
+ csr, len,
+ musb_readb(mbase, MUSB_FADDR),
+ decode_ep0stage(musb->ep0_state));
+
+ /* I sent a stall.. need to acknowledge it now.. */
+ if (csr & MUSB_CSR0_P_SENTSTALL) {
+ musb_writew(regs, MUSB_CSR0,
+ csr & ~MUSB_CSR0_P_SENTSTALL);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ csr = musb_readw(regs, MUSB_CSR0);
+ }
+
+ /* request ended "early" */
+ if (csr & MUSB_CSR0_P_SETUPEND) {
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* NOTE: request may need completion */
+ }
+
+ /* docs from Mentor only describe tx, rx, and idle/setup states.
+ * we need to handle nuances around status stages, and also the
+ * case where status and setup stages come back-to-back ...
+ */
+ switch (musb->ep0_state) {
+
+ case MUSB_EP0_STAGE_TX:
+ /* irq on clearing txpktrdy */
+ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
+ ep0_txstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_RX:
+ /* irq on set rxpktrdy */
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ ep0_rxstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_STATUSIN:
+ /* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+ /* update address (if needed) only @ the end of the
+ * status phase per usb spec, which also guarantees
+ * we get 10 msec to receive this irq... until this
+ * is done we won't see the next packet.
+ */
+ if (musb->set_address) {
+ musb->set_address = false;
+ musb_writeb(mbase, MUSB_FADDR, musb->address);
+ }
+
+ /* enter test mode if needed (exit by reset) */
+ else if (musb->test_mode) {
+ DBG(1, "entering TESTMODE\n");
+
+ if (MUSB_TEST_PACKET == musb->test_mode_nr)
+ musb_load_testpacket(musb);
+
+ musb_writeb(mbase, MUSB_TESTMODE,
+ musb->test_mode_nr);
+ }
+ /* FALLTHROUGH */
+
+ case MUSB_EP0_STAGE_STATUSOUT:
+ /* end of sequence #1: write to host (TX state) */
+ {
+ struct usb_request *req;
+
+ req = next_ep0_request(musb);
+ if (req)
+ musb_g_ep0_giveback(musb, req);
+ }
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ /* FALLTHROUGH */
+
+ case MUSB_EP0_STAGE_SETUP:
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ struct usb_ctrlrequest setup;
+ int handled = 0;
+
+ if (len != 8) {
+ ERR("SETUP packet len %d != 8 ?\n", len);
+ break;
+ }
+ musb_read_setup(musb, &setup);
+ retval = IRQ_HANDLED;
+
+ /* sometimes the RESET won't be reported */
+ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
+ u8 power;
+
+ printk(KERN_NOTICE "%s: peripheral reset "
+ "irq lost!\n",
+ musb_driver_name);
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ }
+
+ switch (musb->ep0_state) {
+
+ /* sequence #3 (no data stage), includes requests
+ * we can't forward (notably SET_ADDRESS and the
+ * device/endpoint feature set/clear operations)
+ * plus SET_CONFIGURATION and others we must
+ */
+ case MUSB_EP0_STAGE_ACKWAIT:
+ handled = service_zero_data_request(
+ musb, &setup);
+
+ /* status stage might be immediate */
+ if (handled > 0) {
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSIN;
+ }
+ break;
+
+ /* sequence #1 (IN to host), includes GET_STATUS
+ * requests that we can't forward, GET_DESCRIPTOR
+ * and others that we must
+ */
+ case MUSB_EP0_STAGE_TX:
+ handled = service_in_request(musb, &setup);
+ if (handled > 0) {
+ musb->ackpend = MUSB_CSR0_TXPKTRDY
+ | MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSOUT;
+ }
+ break;
+
+ /* sequence #2 (OUT from host), always forward */
+ default: /* MUSB_EP0_STAGE_RX */
+ break;
+ }
+
+ DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+ handled, csr,
+ decode_ep0stage(musb->ep0_state));
+
+ /* unless we need to delegate this to the gadget
+ * driver, we know how to wrap this up: csr0 has
+ * not yet been written.
+ */
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(musb, &setup);
+ if (handled < 0) {
+ musb_ep_select(mbase, 0);
+stall:
+ DBG(3, "stall (%d)\n", handled);
+ musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+finish:
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend);
+ musb->ackpend = 0;
+ }
+ }
+ break;
+
+ case MUSB_EP0_STAGE_ACKWAIT:
+ /* This should not happen. But happens with tusb6010 with
+ * g_file_storage and high speed. Do nothing.
+ */
+ retval = IRQ_HANDLED;
+ break;
+
+ default:
+ /* "can't happen" */
+ WARN_ON(1);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ break;
+ }
+
+ return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+ struct musb_ep *ep;
+ struct musb_request *req;
+ struct musb *musb;
+ int status;
+ unsigned long lockflags;
+ void __iomem *regs;
+
+ if (!e || !r)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ regs = musb->control_ep->regs;
+
+ req = to_musb_request(r);
+ req->musb = musb;
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->tx = ep->is_in;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
+ status = 0;
+ break;
+ default:
+ DBG(1, "ep0 request queued in state %d\n",
+ musb->ep0_state);
+ status = -EINVAL;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(req->request.list), &(ep->req_list));
+
+ DBG(3, "queue to %s (%s), length=%d\n",
+ ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+ req->request.length);
+
+ musb_ep_select(musb->mregs, 0);
+
+ /* sequence #1, IN ... start writing the data */
+ if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+ ep0_txstate(musb);
+
+ /* sequence #3, no-data ... issue IN status */
+ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
+ if (req->request.length)
+ status = -EINVAL;
+ else {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend | MUSB_CSR0_P_DATAEND);
+ musb->ackpend = 0;
+ musb_g_ep0_giveback(ep->musb, r);
+ }
+
+ /* else for sequence #2 (OUT), caller provides a buffer
+ * before the next packet arrives. deferred responses
+ * (after SETUP is acked) are racey.
+ */
+ } else if (musb->ackpend) {
+ musb_writew(regs, MUSB_CSR0, musb->ackpend);
+ musb->ackpend = 0;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+ struct musb_ep *ep;
+ struct musb *musb;
+ void __iomem *base, *regs;
+ unsigned long flags;
+ int status;
+ u16 csr;
+
+ if (!e || !value)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ base = musb->mregs;
+ regs = musb->control_ep->regs;
+ status = 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ musb_ep_select(base, 0);
+ csr = musb->ackpend;
+
+ switch (musb->ep0_state) {
+
+ /* Stalls are usually issued after parsing SETUP packet, either
+ * directly in irq context from setup() or else later.
+ */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* FALLTHROUGH */
+
+ /* It's also OK to issue stalls during callbacks when a non-empty
+ * DATA stage buffer has been read (or even written).
+ */
+ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
+ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
+
+ csr |= MUSB_CSR0_P_SENDSTALL;
+ musb_writew(regs, MUSB_CSR0, csr);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ackpend = 0;
+ break;
+ default:
+ DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+ status = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+const struct usb_ep_ops musb_g_ep0_ops = {
+ .enable = musb_g_ep0_enable,
+ .disable = musb_g_ep0_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_g_ep0_queue,
+ .dequeue = musb_g_ep0_dequeue,
+ .set_halt = musb_g_ep0_halt,
+};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 0000000..8b4be01
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ * they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ * + including ep0, with all usbtest cases 9, 10
+ * + usbtest 14 (ep0out) doesn't seem to run at all
+ * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ * configurations, but otherwise double buffering passes basic tests.
+ * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ * + about 1/15 the speed of typical EHCI implementations (PCI)
+ * + RX, all too often reqpkt seems to misbehave after tx
+ * + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ * transfers unable to starve other requests; or to make efficient use
+ * of hardware with periodic transfers. (Note that network drivers
+ * commonly post bulk reads that stay pending for a long time; these
+ * would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ * extra endpoint for periodic use enabling hub + keybd + mouse. That
+ * mostly works, except that with "usbnet" it's easy to trigger cases
+ * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
+ * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ * although ARP RX wins. (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction. The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic: the endpoint will be
+ * "claimed" until its software queue is no longer refilled. No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int nOut,
+ u8 *buf, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 1000;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (retries-- < 1) {
+ ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+ return;
+ }
+ mdelay(1);
+ }
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ if (ep->epnum) {
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+ } else {
+ txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+ musb_writew(ep->regs, MUSB_CSR0, txcsr);
+ }
+
+}
+
+static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+ u16 frame;
+ u32 len;
+ void *buf;
+ void __iomem *mbase = musb->mregs;
+ struct urb *urb = next_urb(qh);
+ struct musb_hw_ep *hw_ep = qh->hw_ep;
+ unsigned pipe = urb->pipe;
+ u8 address = usb_pipedevice(pipe);
+ int epnum = hw_ep->epnum;
+
+ /* initialize software qh state */
+ qh->offset = 0;
+ qh->segsize = 0;
+
+ /* gather right source of data */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfers always start with SETUP */
+ is_in = 0;
+ hw_ep->out_qh = qh;
+ musb->ep0_stage = MUSB_EP0_START;
+ buf = urb->setup_packet;
+ len = 8;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ qh->iso_idx = 0;
+ qh->frame = 0;
+ buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+ len = urb->iso_frame_desc[0].length;
+ break;
+ default: /* bulk, interrupt */
+ buf = urb->transfer_buffer;
+ len = urb->transfer_buffer_length;
+ }
+
+ DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ qh, urb, address, qh->epnum,
+ is_in ? "in" : "out",
+ ({char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
+ case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
+ default: s = "-intr"; break;
+ }; s; }),
+ epnum, buf, len);
+
+ /* Configure endpoint */
+ if (is_in || hw_ep->is_shared_fifo)
+ hw_ep->in_qh = qh;
+ else
+ hw_ep->out_qh = qh;
+ musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+
+ /* transmit may have more work: start it when it is time */
+ if (is_in)
+ return;
+
+ /* determine if the time is right for a periodic transfer */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ DBG(3, "check whether there's still time for periodic Tx\n");
+ qh->iso_idx = 0;
+ frame = musb_readw(mbase, MUSB_FRAME);
+ /* FIXME this doesn't implement that scheduling policy ...
+ * or handle framecounter wrapping
+ */
+ if ((urb->transfer_flags & URB_ISO_ASAP)
+ || (frame >= urb->start_frame)) {
+ /* REVISIT the SOF irq handler shouldn't duplicate
+ * this code; and we don't init urb->start_frame...
+ */
+ qh->frame = 0;
+ goto start;
+ } else {
+ qh->frame = urb->start_frame;
+ /* enable SOF interrupt so we can count down */
+ DBG(1, "SOF for %d\n", epnum);
+#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+ }
+ break;
+ default:
+start:
+ DBG(4, "Start TX%d %s\n", epnum,
+ hw_ep->tx_channel ? "dma" : "pio");
+
+ if (!hw_ep->tx_channel)
+ musb_h_tx_start(hw_ep);
+ else if (is_cppi_enabled() || tusb_dma_omap())
+ cppi_host_txdma_start(hw_ep);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static void
+__musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ DBG(({ int level; switch (urb->status) {
+ case 0:
+ level = 4;
+ break;
+ /* common/boring faults */
+ case -EREMOTEIO:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ case -EPIPE:
+ level = 3;
+ break;
+ default:
+ level = 2;
+ break;
+ }; level; }),
+ "complete %p (%d), dev%d ep%d%s, %d/%d\n",
+ urb, urb->status,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->actual_length, urb->transfer_buffer_length
+ );
+
+ spin_unlock(&musb->lock);
+ usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+ spin_lock(&musb->lock);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+ struct usb_device *udev = urb->dev;
+ u16 csr;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *qh;
+
+ /* FIXME: the current Mentor DMA code seems to have
+ * problems getting toggle correct.
+ */
+
+ if (is_in || ep->is_shared_fifo)
+ qh = ep->in_qh;
+ else
+ qh = ep->out_qh;
+
+ if (!is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ usb_settoggle(udev, qh->epnum, 1,
+ (csr & MUSB_TXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ usb_settoggle(udev, qh->epnum, 0,
+ (csr & MUSB_RXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+{
+ int is_in;
+ struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
+ int ready = qh->is_ready;
+
+ if (ep->is_shared_fifo)
+ is_in = 1;
+ else
+ is_in = usb_pipein(urb->pipe);
+
+ /* save toggle eagerly, for paranoia */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ musb_save_toggle(ep, is_in, urb);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (status == 0 && urb->error_count)
+ status = -EXDEV;
+ break;
+ }
+
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, status);
+ qh->is_ready = ready;
+
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
+
+ if (is_in)
+ ep->rx_reinit = 1;
+ else
+ ep->tx_reinit = 1;
+
+ /* clobber old pointers to this qh */
+ if (is_in || ep->is_shared_fifo)
+ ep->in_qh = NULL;
+ else
+ ep->out_qh = NULL;
+ qh->hep->hcpriv = NULL;
+
+ switch (qh->type) {
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ /* this is where periodic bandwidth should be
+ * de-allocated if it's tracked and allocated;
+ * and where we'd update the schedule tree...
+ */
+ musb->periodic[ep->epnum] = NULL;
+ kfree(qh);
+ qh = NULL;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ /* fifo policy for these lists, except that NAKing
+ * should rotate a qh to the end (for fairness).
+ */
+ head = qh->ring.prev;
+ list_del(&qh->ring);
+ kfree(qh);
+ qh = first_qh(head);
+ break;
+ }
+ }
+ return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
+{
+ struct musb_qh *qh;
+
+ if (is_in || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ if (urb->status == -EINPROGRESS)
+ qh = musb_giveback(qh, urb, 0);
+ else
+ qh = musb_giveback(qh, urb, urb->status);
+
+ if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+ DBG(4, "... next ep%d %cX urb %p\n",
+ hw_ep->epnum, is_in ? 'R' : 'T',
+ next_urb(qh));
+ musb_start_urb(musb, is_in, qh);
+ }
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+ /* we don't want fifo to fill itself again;
+ * ignore dma (various models),
+ * leave toggle alone (may not have been saved yet)
+ */
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+ csr &= ~(MUSB_RXCSR_H_REQPKT
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+
+ /* write 2x to allow double buffering */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+ /* flush writebuffer */
+ return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+ u16 rx_count;
+ u8 *buf;
+ u16 csr;
+ bool done = false;
+ u32 length;
+ int do_flush = 0;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ int pipe = urb->pipe;
+ void *buffer = urb->transfer_buffer;
+
+ /* musb_ep_select(mbase, epnum); */
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ urb->transfer_buffer, qh->offset,
+ urb->transfer_buffer_length);
+
+ /* unload FIFO */
+ if (usb_pipeisoc(pipe)) {
+ int status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ if (iso_err) {
+ status = -EILSEQ;
+ urb->error_count++;
+ }
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ buf = buffer + d->offset;
+ length = d->length;
+ if (rx_count > length) {
+ if (status == 0) {
+ status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ d->actual_length = length;
+
+ d->status = status;
+
+ /* see if we are done */
+ done = (++qh->iso_idx >= urb->number_of_packets);
+ } else {
+ /* non-isoch */
+ buf = buffer + qh->offset;
+ length = urb->transfer_buffer_length - qh->offset;
+ if (rx_count > length) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = -EOVERFLOW;
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ qh->offset += length;
+
+ /* see if we are done */
+ done = (urb->actual_length == urb->transfer_buffer_length)
+ || (rx_count < qh->maxpacket)
+ || (urb->status != -EINPROGRESS);
+ if (done
+ && (urb->status == -EINPROGRESS)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->actual_length
+ < urb->transfer_buffer_length))
+ urb->status = -EREMOTEIO;
+ }
+
+ musb_read_fifo(hw_ep, length, buf);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ if (unlikely(do_flush))
+ musb_h_flush_rxfifo(hw_ep, csr);
+ else {
+ /* REVISIT this assumes AUTOCLEAR is never set */
+ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+ if (!done)
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+ u16 csr;
+
+ /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
+ * That always uses tx_reinit since ep0 repurposes TX register
+ * offsets; the initial SETUP packet is also a kind of OUT.
+ */
+
+ /* if programmed for Tx, put it in RX mode */
+ if (ep->is_shared_fifo) {
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_MODE) {
+ musb_h_tx_flush_fifo(ep);
+ musb_writew(ep->regs, MUSB_TXCSR,
+ MUSB_TXCSR_FRCDATATOG);
+ }
+ /* clear mode (and everything else) to enable Rx */
+ musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+ /* scrub all previous state, clearing toggle */
+ } else {
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
+ qh->addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
+ qh->h_addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
+ qh->h_port_reg);
+ } else
+ musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint, interval/NAKlimit, i/o size */
+ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+ musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+ /* NOTE: bulk combining rewrites high bits of maxpacket */
+ musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+
+ ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int is_out,
+ u8 *buf, u32 len)
+{
+ struct dma_controller *dma_controller;
+ struct dma_channel *dma_channel;
+ u8 dma_ok;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh;
+ u16 packet_sz;
+
+ if (!is_out || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ packet_sz = qh->maxpacket;
+
+ DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d\n",
+ is_out ? "-->" : "<--",
+ epnum, urb, urb->dev->speed,
+ qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+ qh->h_addr_reg, qh->h_port_reg,
+ len);
+
+ musb_ep_select(mbase, epnum);
+
+ /* candidate for DMA? */
+ dma_controller = musb->dma_controller;
+ if (is_dma_capable() && epnum && dma_controller) {
+ dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+ if (!dma_channel) {
+ dma_channel = dma_controller->channel_alloc(
+ dma_controller, hw_ep, is_out);
+ if (is_out)
+ hw_ep->tx_channel = dma_channel;
+ else
+ hw_ep->rx_channel = dma_channel;
+ }
+ } else
+ dma_channel = NULL;
+
+ /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+ /* OUT/transmit/EP0 or IN/receive? */
+ if (is_out) {
+ u16 csr;
+ u16 int_txe;
+ u16 load_count;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* disable interrupt in case we flush */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ /* general endpoint setup */
+ if (epnum) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* flush all old state, set default */
+ musb_h_tx_flush_fifo(hw_ep);
+ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_FRCDATATOG
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY
+ );
+ csr |= MUSB_TXCSR_MODE;
+
+ if (usb_gettoggle(urb->dev,
+ qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+
+ /* twice in case of double packet buffering */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ /* endpoint 0: just flush */
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+ qh->addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+ qh->h_addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+ qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+ } else
+ musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint/interval/NAKlimit */
+ if (epnum) {
+ musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
+ else
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz);
+ musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+ } else {
+ musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+ if (musb->is_multipoint)
+ musb_writeb(epio, MUSB_TYPE0,
+ qh->type_reg);
+ }
+
+ if (can_bulk_split(musb, qh->type))
+ load_count = min((u32) hw_ep->max_packet_sz_tx,
+ len);
+ else
+ load_count = min((u32) packet_sz, len);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma_channel) {
+
+ /* clear previous state */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ qh->segsize = min(len, dma_channel->max_len);
+
+ if (qh->segsize <= packet_sz)
+ dma_channel->desired_mode = 0;
+ else
+ dma_channel->desired_mode = 1;
+
+
+ if (dma_channel->desired_mode == 0) {
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE);
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ dma_channel->desired_mode,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ if (is_out)
+ hw_ep->tx_channel = NULL;
+ else
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ }
+ }
+#endif
+
+ /* candidate for DMA */
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+
+ /* program endpoint CSRs first, then setup DMA.
+ * assume CPPI setup succeeds.
+ * defer enabling dma.
+ */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* TX uses "rndis" mode automatically, but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ (urb->transfer_flags
+ & URB_ZERO_PACKET)
+ == URB_ZERO_PACKET,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->tx_channel = NULL;
+ dma_channel = NULL;
+
+ /* REVISIT there's an error path here that
+ * needs handling: can't do dma, but
+ * there's no pio buffer address...
+ */
+ }
+ }
+
+ if (load_count) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* PIO to load FIFO */
+ qh->segsize = load_count;
+ musb_write_fifo(hw_ep, load_count, buf);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_AUTOSET);
+ /* write CSR */
+ csr |= MUSB_TXCSR_MODE;
+
+ if (epnum)
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* IN/receive */
+ } else {
+ u16 csr;
+
+ if (hw_ep->rx_reinit) {
+ musb_rx_reinit(musb, qh, hw_ep);
+
+ /* init new state: toggle and NYET, maybe DMA later */
+ if (usb_gettoggle(urb->dev, qh->epnum, 0))
+ csr = MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE;
+ else
+ csr = 0;
+ if (qh->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ if (csr & (MUSB_RXCSR_RXPKTRDY
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ hw_ep->epnum, csr);
+
+ /* scrub any stale state, leaving toggle alone */
+ csr &= MUSB_RXCSR_DISNYET;
+ }
+
+ /* kick things off */
+
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ /* candidate for DMA */
+ if (dma_channel) {
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs,
+ MUSB_RXCSR);
+
+ /* unless caller treats short rx transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ !(urb->transfer_flags
+ & URB_SHORT_NOT_OK),
+ urb->transfer_dma,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(
+ dma_channel);
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
+ }
+ }
+
+ csr |= MUSB_RXCSR_H_REQPKT;
+ DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ }
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+ bool more = false;
+ u8 *fifo_dest = NULL;
+ u16 fifo_count = 0;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ struct musb_qh *qh = hw_ep->in_qh;
+ struct usb_ctrlrequest *request;
+
+ switch (musb->ep0_stage) {
+ case MUSB_EP0_IN:
+ fifo_dest = urb->transfer_buffer + urb->actual_length;
+ fifo_count = min(len, ((u16) (urb->transfer_buffer_length
+ - urb->actual_length)));
+ if (fifo_count < len)
+ urb->status = -EOVERFLOW;
+
+ musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ if (len < qh->maxpacket) {
+ /* always terminate on short read; it's
+ * rarely reported as an error.
+ */
+ } else if (urb->actual_length <
+ urb->transfer_buffer_length)
+ more = true;
+ break;
+ case MUSB_EP0_START:
+ request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+ if (!request->wLength) {
+ DBG(4, "start no-DATA\n");
+ break;
+ } else if (request->bRequestType & USB_DIR_IN) {
+ DBG(4, "start IN-DATA\n");
+ musb->ep0_stage = MUSB_EP0_IN;
+ more = true;
+ break;
+ } else {
+ DBG(4, "start OUT-DATA\n");
+ musb->ep0_stage = MUSB_EP0_OUT;
+ more = true;
+ }
+ /* FALLTHROUGH */
+ case MUSB_EP0_OUT:
+ fifo_count = min(qh->maxpacket, ((u16)
+ (urb->transfer_buffer_length
+ - urb->actual_length)));
+
+ if (fifo_count) {
+ fifo_dest = (u8 *) (urb->transfer_buffer
+ + urb->actual_length);
+ DBG(3, "Sending %d bytes to %p\n",
+ fifo_count, fifo_dest);
+ musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ more = true;
+ }
+ break;
+ default:
+ ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+ break;
+ }
+
+ return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ struct urb *urb;
+ u16 csr, len;
+ int status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ bool complete = false;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* ep0 only has one queue, "in" */
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, 0);
+ csr = musb_readw(epio, MUSB_CSR0);
+ len = (csr & MUSB_CSR0_RXPKTRDY)
+ ? musb_readb(epio, MUSB_COUNT0)
+ : 0;
+
+ DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ csr, qh, len, urb, musb->ep0_stage);
+
+ /* if we just did status stage, we are done */
+ if (MUSB_EP0_STATUS == musb->ep0_stage) {
+ retval = IRQ_HANDLED;
+ complete = true;
+ }
+
+ /* prepare status */
+ if (csr & MUSB_CSR0_H_RXSTALL) {
+ DBG(6, "STALLING ENDPOINT\n");
+ status = -EPIPE;
+
+ } else if (csr & MUSB_CSR0_H_ERROR) {
+ DBG(2, "no response, csr0 %04x\n", csr);
+ status = -EPROTO;
+
+ } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+ DBG(2, "control NAK timeout\n");
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * control transfer, if another one is queued, so that
+ * ep0 is more likely to stay busy.
+ *
+ * if (qh->ring.next != &musb->control), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_writew(epio, MUSB_CSR0, 0);
+ retval = IRQ_HANDLED;
+ }
+
+ if (status) {
+ DBG(6, "aborting\n");
+ retval = IRQ_HANDLED;
+ if (urb)
+ urb->status = status;
+ complete = true;
+
+ /* use the proper sequence to abort the transfer */
+ if (csr & MUSB_CSR0_H_REQPKT) {
+ csr &= ~MUSB_CSR0_H_REQPKT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ } else {
+ csr |= MUSB_CSR0_FLUSHFIFO;
+ musb_writew(epio, MUSB_CSR0, csr);
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ }
+
+ musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+ /* clear it */
+ musb_writew(epio, MUSB_CSR0, 0);
+ }
+
+ if (unlikely(!urb)) {
+ /* stop endpoint since we have no place for its data, this
+ * SHOULD NEVER HAPPEN! */
+ ERR("no URB for end 0\n");
+
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, 0);
+
+ goto done;
+ }
+
+ if (!complete) {
+ /* call common logic and prepare response */
+ if (musb_h_ep0_continue(musb, len, urb)) {
+ /* more packets required */
+ csr = (MUSB_EP0_IN == musb->ep0_stage)
+ ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+ } else {
+ /* data transfer complete; perform status phase */
+ if (usb_pipeout(urb->pipe)
+ || !urb->transfer_buffer_length)
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_H_REQPKT;
+ else
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_TXPKTRDY;
+
+ /* flag status stage */
+ musb->ep0_stage = MUSB_EP0_STATUS;
+
+ DBG(5, "ep0 STATUS, csr %04x\n", csr);
+
+ }
+ musb_writew(epio, MUSB_CSR0, csr);
+ retval = IRQ_HANDLED;
+ } else
+ musb->ep0_stage = MUSB_EP0_IDLE;
+
+ /* call completion handler if done */
+ if (complete)
+ musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+ return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+ - ... which starts DMA to fifo in mode 1 or 0
+
+ DMA Isr (transfer complete) -> TxAvail()
+ - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
+ only in musb_cleanup_urb)
+ - TxPktRdy has to be set in mode 0 or for
+ short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+ int pipe;
+ bool done = false;
+ u16 tx_csr;
+ size_t wLength = 0;
+ u8 *buf = NULL;
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->out_qh;
+ u32 status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct dma_channel *dma;
+
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, epnum);
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* with CPPI, DMA sometimes triggers "extra" irqs */
+ if (!urb) {
+ DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ goto finish;
+ }
+
+ pipe = urb->pipe;
+ dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+ DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ dma ? ", dma" : "");
+
+ /* check for errors */
+ if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
+ /* dma was disabled, fifo flushed */
+ DBG(3, "TX end %d stall\n", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
+ /* (NON-ISO) dma was disabled, fifo flushed */
+ DBG(3, "TX 3strikes on ep=%d\n", epnum);
+
+ status = -ETIMEDOUT;
+
+ } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+ DBG(6, "TX end=%d device not responding\n", epnum);
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ goto finish;
+ }
+
+ if (status) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ }
+
+ /* do the proper sequence to abort the transfer in the
+ * usb core; the dma engine should already be stopped.
+ */
+ musb_h_tx_flush_fifo(hw_ep);
+ tx_csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ );
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ musb_writeb(epio, MUSB_TXINTERVAL, 0);
+
+ done = true;
+ }
+
+ /* second cppi case */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ goto finish;
+
+ }
+
+ /* REVISIT this looks wrong... */
+ if (!status || dma || usb_pipeisoc(pipe)) {
+ if (dma)
+ wLength = dma->actual_len;
+ else
+ wLength = qh->segsize;
+ qh->offset += wLength;
+
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = qh->segsize;
+ if (++qh->iso_idx >= urb->number_of_packets) {
+ done = true;
+ } else {
+ d++;
+ buf = urb->transfer_buffer + d->offset;
+ wLength = d->length;
+ }
+ } else if (dma) {
+ done = true;
+ } else {
+ /* see if we need to send more data, or ZLP */
+ if (qh->segsize < qh->maxpacket)
+ done = true;
+ else if (qh->offset == urb->transfer_buffer_length
+ && !(urb->transfer_flags
+ & URB_ZERO_PACKET))
+ done = true;
+ if (!done) {
+ buf = urb->transfer_buffer
+ + qh->offset;
+ wLength = urb->transfer_buffer_length
+ - qh->offset;
+ }
+ }
+ }
+
+ /* urb->status != -EINPROGRESS means request has been faulted,
+ * so we must abort this transfer after cleanup
+ */
+ if (urb->status != -EINPROGRESS) {
+ done = true;
+ if (status == 0)
+ status = urb->status;
+ }
+
+ if (done) {
+ /* set status */
+ urb->status = status;
+ urb->actual_length = qh->offset;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+
+ } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
+ /* WARN_ON(!buf); */
+
+ /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
+ * (and presumably, fifo is not half-full) we should write TWO
+ * packets before updating TXCSR ... other docs disagree ...
+ */
+ /* PIO: start next packet in this URB */
+ wLength = min(qh->maxpacket, (u16) wLength);
+ musb_write_fifo(hw_ep, wLength, buf);
+ qh->segsize = wLength;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+ } else
+ DBG(1, "not complete, but dma enabled?\n");
+
+finish:
+ return;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, ProgramEndpoint
+ - first IN token is sent out (by setting ReqPkt)
+ LinuxIsr -> RxReady()
+ /\ => first packet is received
+ | - Set in mode 0 (DmaEnab, ~ReqPkt)
+ | -> DMA Isr (transfer complete) -> RxReady()
+ | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+ | - if urb not complete, send next IN token (ReqPkt)
+ | | else complete urb.
+ | |
+ ---------------------------
+ *
+ * Nuances of mode 1:
+ * For short packets, no ack (+RxPktRdy) is sent automatically
+ * (even if AutoClear is ON)
+ * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ * automatically => major problem, as collecting the next packet becomes
+ * difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ * All we care about at this driver level is that
+ * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ * (b) termination conditions are: short RX, or buffer full;
+ * (c) fault modes include
+ * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ * (and that endpoint's dma queue stops immediately)
+ * - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ * thus be a great candidate for using mode 1 ... for all but the
+ * last packet of one URB's transfer.
+ */
+
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *musb, u8 epnum)
+{
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ size_t xfer_len;
+ void __iomem *mbase = musb->mregs;
+ int pipe;
+ u16 rx_csr, val;
+ bool iso_err = false;
+ bool done = false;
+ u32 status;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+
+ urb = next_urb(qh);
+ dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+ status = 0;
+ xfer_len = 0;
+
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ val = rx_csr;
+
+ if (unlikely(!urb)) {
+ /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+ * usbtest #11 (unlinks) triggers it regularly, sometimes
+ * with fifo full. (Only with DMA??)
+ */
+ DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+ musb_readw(epio, MUSB_RXCOUNT));
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ return;
+ }
+
+ pipe = urb->pipe;
+
+ DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+ epnum, rx_csr, urb->actual_length,
+ dma ? dma->actual_len : 0);
+
+ /* check for errors, concurrent stall & unlink is not really
+ * handled yet! */
+ if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
+ DBG(3, "RX end %d STALL\n", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
+ DBG(3, "end %d RX proto error\n", epnum);
+
+ status = -EPROTO;
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+
+ } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
+
+ if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+ /* NOTE this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) rx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->in_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ DBG(6, "RX end %d NAK timeout\n", epnum);
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS
+ | MUSB_RXCSR_H_REQPKT);
+
+ goto finish;
+ } else {
+ DBG(4, "RX end %d ISO data error\n", epnum);
+ /* packet error reported later */
+ iso_err = true;
+ }
+ }
+
+ /* faults abort the transfer */
+ if (status) {
+ /* clean up dma and collect transfer count */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ }
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ done = true;
+ goto finish;
+ }
+
+ if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
+ /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+ ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+ goto finish;
+ }
+
+ /* thorough shutdown for now ... given more precise fault handling
+ * and better queueing support, we might keep a DMA pipeline going
+ * while processing this irq for earlier completions.
+ */
+
+ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+#ifndef CONFIG_USB_INVENTRA_DMA
+ if (rx_csr & MUSB_RXCSR_H_REQPKT) {
+ /* REVISIT this happened for a while on some short reads...
+ * the cleanup still needs investigation... looks bad...
+ * and also duplicates dma cleanup code above ... plus,
+ * shouldn't this be the "half full" double buffer case?
+ */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ done = true;
+ }
+
+ DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ xfer_len, dma ? ", dma" : "");
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | rx_csr);
+ }
+#endif
+ if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+ xfer_len = dma->actual_len;
+
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_RXPKTRDY);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ /* done if urb buffer is full or short packet is recd */
+ done = (urb->actual_length + xfer_len >=
+ urb->transfer_buffer_length
+ || dma->actual_len < qh->maxpacket);
+
+ /* send IN token for next packet, without AUTOREQ */
+ if (!done) {
+ val |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | val);
+ }
+
+ DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+ done ? "off" : "reset",
+ musb_readw(epio, MUSB_RXCSR),
+ musb_readw(epio, MUSB_RXCOUNT));
+#else
+ done = true;
+#endif
+ } else if (urb->status == -EINPROGRESS) {
+ /* if no errors, be sure a packet is ready for unloading */
+ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+ status = -EPROTO;
+ ERR("Rx interrupt with no errors or packet!\n");
+
+ /* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+/* SCRUB (RX) */
+ /* do the proper sequence to abort the transfer */
+ musb_ep_select(mbase, epnum);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, val);
+ goto finish;
+ }
+
+ /* we are expecting IN packets */
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma) {
+ struct dma_controller *c;
+ u16 rx_count;
+ int ret;
+
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+
+ DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+ epnum, rx_count,
+ urb->transfer_dma
+ + urb->actual_length,
+ qh->offset,
+ urb->transfer_buffer_length);
+
+ c = musb->dma_controller;
+
+ dma->desired_mode = 0;
+#ifdef USE_MODE1
+ /* because of the issue below, mode 1 will
+ * only rarely behave with correct semantics.
+ */
+ if ((urb->transfer_flags &
+ URB_SHORT_NOT_OK)
+ && (urb->transfer_buffer_length -
+ urb->actual_length)
+ > qh->maxpacket)
+ dma->desired_mode = 1;
+#endif
+
+/* Disadvantage of using mode 1:
+ * It's basically usable only for mass storage class; essentially all
+ * other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ * If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ * to use the extra IN token to grab the last packet using mode 0, then
+ * the problem is that you cannot be sure when the device will send the
+ * last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ * such that it gets lost when RxCSR is re-set at the end of the mode 1
+ * transfer, while sometimes it is recd just a little late so that if you
+ * try to configure for mode 0 soon after the mode 1 transfer is
+ * completed, you will find rxcount 0. Okay, so you might think why not
+ * wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+
+ if (dma->desired_mode == 0)
+ val &= ~MUSB_RXCSR_H_AUTOREQ;
+ else
+ val |= MUSB_RXCSR_H_AUTOREQ;
+ val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | val);
+
+ /* REVISIT if when actual_length != 0,
+ * transfer_buffer_length needs to be
+ * adjusted first...
+ */
+ ret = c->channel_program(
+ dma, qh->maxpacket,
+ dma->desired_mode,
+ urb->transfer_dma
+ + urb->actual_length,
+ (dma->desired_mode == 0)
+ ? rx_count
+ : urb->transfer_buffer_length);
+
+ if (!ret) {
+ c->channel_release(dma);
+ hw_ep->rx_channel = NULL;
+ dma = NULL;
+ /* REVISIT reset CSR */
+ }
+ }
+#endif /* Mentor DMA */
+
+ if (!dma) {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ DBG(6, "read %spacket\n", done ? "last " : "");
+ }
+ }
+
+ if (dma && usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+ int iso_stat = status;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length += xfer_len;
+ if (iso_err) {
+ iso_stat = -EILSEQ;
+ urb->error_count++;
+ }
+ d->status = iso_stat;
+ }
+
+finish:
+ urb->actual_length += xfer_len;
+ qh->offset += xfer_len;
+ if (done) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = status;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+ }
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+ struct musb *musb,
+ struct musb_qh *qh,
+ int is_in)
+{
+ int idle;
+ int best_diff;
+ int best_end, epnum;
+ struct musb_hw_ep *hw_ep = NULL;
+ struct list_head *head = NULL;
+
+ /* use fixed hardware for control and bulk */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ head = &musb->control;
+ hw_ep = musb->control_ep;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ hw_ep = musb->bulk_ep;
+ if (is_in)
+ head = &musb->in_bulk;
+ else
+ head = &musb->out_bulk;
+ break;
+ }
+ if (head) {
+ idle = list_empty(head);
+ list_add_tail(&qh->ring, head);
+ goto success;
+ }
+
+ /* else, periodic transfers get muxed to other endpoints */
+
+ /* FIXME this doesn't consider direction, so it can only
+ * work for one half of the endpoint hardware, and assumes
+ * the previous cases handled all non-shared endpoints...
+ */
+
+ /* we know this qh hasn't been scheduled, so all we need to do
+ * is choose which hardware endpoint to put it on ...
+ *
+ * REVISIT what we really want here is a regular schedule tree
+ * like e.g. OHCI uses, but for now musb->periodic is just an
+ * array of the _single_ logical endpoint associated with a
+ * given physical one (identity mapping logical->physical).
+ *
+ * that simplistic approach makes TT scheduling a lot simpler;
+ * there is none, and thus none of its complexity...
+ */
+ best_diff = 4096;
+ best_end = -1;
+
+ for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+ int diff;
+
+ if (musb->periodic[epnum])
+ continue;
+ hw_ep = &musb->endpoints[epnum];
+ if (hw_ep == musb->bulk_ep)
+ continue;
+
+ if (is_in)
+ diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+ else
+ diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+
+ if (diff > 0 && best_diff > diff) {
+ best_diff = diff;
+ best_end = epnum;
+ }
+ }
+ if (best_end < 0)
+ return -ENOSPC;
+
+ idle = 1;
+ hw_ep = musb->endpoints + best_end;
+ musb->periodic[best_end] = qh;
+ DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+success:
+ qh->hw_ep = hw_ep;
+ qh->hep->hcpriv = qh;
+ if (idle)
+ musb_start_urb(musb, is_in, qh);
+ return 0;
+}
+
+static int musb_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ struct usb_host_endpoint *hep = urb->ep;
+ struct musb_qh *qh = hep->hcpriv;
+ struct usb_endpoint_descriptor *epd = &hep->desc;
+ int ret;
+ unsigned type_reg;
+ unsigned interval;
+
+ /* host role must be active */
+ if (!is_host_active(musb) || !musb->is_active)
+ return -ENODEV;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ if (ret)
+ return ret;
+
+ /* DMA mapping was already done, if needed, and this urb is on
+ * hep->urb_list ... so there's little to do unless hep wasn't
+ * yet scheduled onto a live qh.
+ *
+ * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+ * disabled, testing for empty qh->ring and avoiding qh setup costs
+ * except for the first urb queued after a config change.
+ */
+ if (qh) {
+ urb->hcpriv = qh;
+ return 0;
+ }
+
+ /* Allocate and initialize qh, minimizing the work done each time
+ * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
+ *
+ * REVISIT consider a dedicated qh kmem_cache, so it's harder
+ * for bugs in other kernel code to break this driver...
+ */
+ qh = kzalloc(sizeof *qh, mem_flags);
+ if (!qh) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ return -ENOMEM;
+ }
+
+ qh->hep = hep;
+ qh->dev = urb->dev;
+ INIT_LIST_HEAD(&qh->ring);
+ qh->is_ready = 1;
+
+ qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+
+ /* no high bandwidth support yet */
+ if (qh->maxpacket & ~0x7ff) {
+ ret = -EMSGSIZE;
+ goto done;
+ }
+
+ qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+ qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+ /* precompute rxtype/txtype/type0 register */
+ type_reg = (qh->type << 4) | qh->epnum;
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ type_reg |= 0xc0;
+ break;
+ case USB_SPEED_FULL:
+ type_reg |= 0x80;
+ break;
+ default:
+ type_reg |= 0x40;
+ }
+ qh->type_reg = type_reg;
+
+ /* precompute rxinterval/txinterval register */
+ interval = min((u8)16, epd->bInterval); /* log encoding */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ /* fullspeed uses linear encoding */
+ if (USB_SPEED_FULL == urb->dev->speed) {
+ interval = epd->bInterval;
+ if (!interval)
+ interval = 1;
+ }
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_ISOC:
+ /* iso always uses log encoding */
+ break;
+ default:
+ /* REVISIT we actually want to use NAK limits, hinting to the
+ * transfer scheduling logic to try some other qh, e.g. try
+ * for 2 msec first:
+ *
+ * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+ *
+ * The downside of disabling this is that transfer scheduling
+ * gets VERY unfair for nonperiodic transfers; a misbehaving
+ * peripheral could make that hurt. Or for reads, one that's
+ * perfectly normal: network and other drivers keep reads
+ * posted at all times, having one pending for a week should
+ * be perfectly safe.
+ *
+ * The upside of disabling it is avoidng transfer scheduling
+ * code to put this aside for while.
+ */
+ interval = 0;
+ }
+ qh->intv_reg = interval;
+
+ /* precompute addressing for external hub/tt ports */
+ if (musb->is_multipoint) {
+ struct usb_device *parent = urb->dev->parent;
+
+ if (parent != hcd->self.root_hub) {
+ qh->h_addr_reg = (u8) parent->devnum;
+
+ /* set up tt info if needed */
+ if (urb->dev->tt) {
+ qh->h_port_reg = (u8) urb->dev->ttport;
+ qh->h_addr_reg |= 0x80;
+ }
+ }
+ }
+
+ /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+ * until we get real dma queues (with an entry for each urb/buffer),
+ * we only have work to do in the former case.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (hep->hcpriv) {
+ /* some concurrent activity submitted another urb to hep...
+ * odd, rare, error prone, but legal.
+ */
+ kfree(qh);
+ ret = 0;
+ } else
+ ret = musb_schedule(musb, qh,
+ epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+ if (ret == 0) {
+ urb->hcpriv = qh;
+ /* FIXME set urb->start_frame for iso/intr, it's tested in
+ * musb_start_urb(), but otherwise only konicawc cares ...
+ */
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+done:
+ if (ret != 0) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ kfree(qh);
+ }
+ return ret;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+{
+ struct musb_hw_ep *ep = qh->hw_ep;
+ void __iomem *epio = ep->regs;
+ unsigned hw_end = ep->epnum;
+ void __iomem *regs = ep->musb->mregs;
+ u16 csr;
+ int status = 0;
+
+ musb_ep_select(regs, hw_end);
+
+ if (is_dma_capable()) {
+ struct dma_channel *dma;
+
+ dma = is_in ? ep->rx_channel : ep->tx_channel;
+ if (dma) {
+ status = ep->musb->dma_controller->channel_abort(dma);
+ DBG(status ? 1 : 3,
+ "abort %cX%d DMA for urb %p --> %d\n",
+ is_in ? 'R' : 'T', ep->epnum,
+ urb, status);
+ urb->actual_length += dma->actual_len;
+ }
+ }
+
+ /* turn off DMA requests, discard state, stop polling ... */
+ if (is_in) {
+ /* giveback saves bulk toggle */
+ csr = musb_h_flush_rxfifo(ep, 0);
+
+ /* REVISIT we still get an irq; should likely clear the
+ * endpoint's irq status here to avoid bogus irqs.
+ * clearing that status is platform-specific...
+ */
+ } else {
+ musb_h_tx_flush_fifo(ep);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* flush cpu writebuffer */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ }
+ if (status == 0)
+ musb_advance_schedule(ep->musb, urb, ep, is_in);
+ return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh;
+ struct list_head *sched;
+ unsigned long flags;
+ int ret;
+
+ DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto done;
+
+ qh = urb->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* Any URB not actively programmed into endpoint hardware can be
+ * immediately given back. Such an URB must be at the head of its
+ * endpoint queue, unless someday we get real DMA queues. And even
+ * then, it might not be known to the hardware...
+ *
+ * Otherwise abort current transfer, pending dma, etc.; urb->status
+ * has already been updated. This is a synchronous abort; it'd be
+ * OK to hold off until after some IRQ, though.
+ */
+ if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
+ ret = -EINPROGRESS;
+ else {
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_pipein(urb->pipe))
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic
+ * transfers won't always be at the head of a
+ * singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+ if (ret < 0 || (sched && qh != first_qh(sched))) {
+ int ready = qh->is_ready;
+
+ ret = 0;
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, 0);
+ qh->is_ready = ready;
+ } else
+ ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ u8 epnum = hep->desc.bEndpointAddress;
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ u8 is_in = epnum & USB_DIR_IN;
+ struct musb_qh *qh = hep->hcpriv;
+ struct urb *urb, *tmp;
+ struct list_head *sched;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (is_in)
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic transfers
+ * won't always be at the head of a singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+
+ /* kick first urb off the hardware, if needed */
+ qh->is_ready = 0;
+ if (!sched || qh == first_qh(sched)) {
+ urb = next_urb(qh);
+
+ /* make software (then hardware) stop ASAP */
+ if (!urb->unlinked)
+ urb->status = -ESHUTDOWN;
+
+ /* cleanup */
+ musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ } else
+ urb = NULL;
+
+ /* then just nuke all the others */
+ list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+ musb_giveback(qh, urb, -ESHUTDOWN);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ return musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ /* NOTE: musb_start() is called when the hub driver turns
+ * on port power, or when (OTG) peripheral starts.
+ */
+ hcd->state = HC_STATE_RUNNING;
+ musb->port1_status = 0;
+ return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+ musb_stop(hcd_to_musb(hcd));
+ hcd->state = HC_STATE_HALT;
+}
+
+static int musb_bus_suspend(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+ return 0;
+
+ if (is_host_active(musb) && musb->is_active) {
+ WARNING("trying to suspend as %s is_active=%i\n",
+ otg_state_string(musb), musb->is_active);
+ return -EBUSY;
+ } else
+ return 0;
+}
+
+static int musb_bus_resume(struct usb_hcd *hcd)
+{
+ /* resuming child port does the work */
+ return 0;
+}
+
+const struct hc_driver musb_hc_driver = {
+ .description = "musb-hcd",
+ .product_desc = "MUSB HDRC host driver",
+ .hcd_priv_size = sizeof(struct musb),
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ /* not using irq handler or reset hooks from usbcore, since
+ * those must be shared with peripheral code for OTG configs
+ */
+
+ .start = musb_h_start,
+ .stop = musb_h_stop,
+
+ .get_frame_number = musb_h_get_frame_number,
+
+ .urb_enqueue = musb_urb_enqueue,
+ .urb_dequeue = musb_urb_dequeue,
+ .endpoint_disable = musb_h_disable,
+
+ .hub_status_data = musb_hub_status_data,
+ .hub_control = musb_hub_control,
+ .bus_suspend = musb_bus_suspend,
+ .bus_resume = musb_bus_resume,
+ /* .start_port_reset = NULL, */
+ /* .hub_irq_enable = NULL, */
+};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 0000000..77bcdb9
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,110 @@
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
+{
+ return container_of((void *) musb, struct usb_hcd, hcd_priv);
+}
+
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return (struct musb *) (hcd->hcd_priv);
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+ struct usb_host_endpoint *hep; /* usbcore info */
+ struct usb_device *dev;
+ struct musb_hw_ep *hw_ep; /* current binding */
+
+ struct list_head ring; /* of musb_qh */
+ /* struct musb_qh *next; */ /* for periodic tree */
+
+ unsigned offset; /* in urb->transfer_buffer */
+ unsigned segsize; /* current xfer fragment */
+
+ u8 type_reg; /* {rx,tx} type register */
+ u8 intv_reg; /* {rx,tx} interval register */
+ u8 addr_reg; /* device address register */
+ u8 h_addr_reg; /* hub address register */
+ u8 h_port_reg; /* hub port register */
+
+ u8 is_ready; /* safe to modify hw_ep */
+ u8 type; /* XFERTYPE_* */
+ u8 epnum;
+ u16 maxpacket;
+ u16 frame; /* for periodic schedule */
+ unsigned iso_idx; /* in urb->iso_frame_desc[] */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+ if (list_empty(q))
+ return NULL;
+ return list_entry(q->next, struct musb_qh, ring);
+}
+
+
+extern void musb_root_disconnect(struct musb *musb);
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+
+extern const struct hc_driver musb_hc_driver;
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct list_head *queue;
+
+ if (!qh)
+ return NULL;
+ queue = &qh->hep->urb_list;
+ if (list_empty(queue))
+ return NULL;
+ return list_entry(queue->next, struct urb, urb_list);
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 0000000..6bbedae
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,115 @@
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+
+#ifndef CONFIG_ARM
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+ { insl((unsigned long)addr, buf, len); }
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+ { insw((unsigned long)addr, buf, len); }
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+ { insb((unsigned long)addr, buf, len); }
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+ { outsl((unsigned long)addr, buf, len); }
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+ { outsw((unsigned long)addr, buf, len); }
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+ { outsb((unsigned long)addr, buf, len); }
+
+#endif
+
+/* NOTE: these offsets are all in bytes */
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+ { return __raw_readw(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+ { return __raw_readl(addr + offset); }
+
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+ { __raw_writew(data, addr + offset); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+ { __raw_writel(data, addr + offset); }
+
+
+#ifdef CONFIG_USB_TUSB6010
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+{
+ u16 tmp;
+ u8 val;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ val = (tmp >> 8);
+ else
+ val = tmp & 0xff;
+
+ return val;
+}
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ u16 tmp;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ tmp = (data << 8) | (tmp & 0xff);
+ else
+ tmp = (tmp & 0xff00) | data;
+
+ __raw_writew(tmp, addr + (offset & ~1));
+}
+
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+ { return __raw_readb(addr + offset); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+ { __raw_writeb(data, addr + offset); }
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 0000000..9c22866
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,300 @@
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_REGS_H__
+#define __MUSB_REGS_H__
+
+#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR 0x00 /* 8-bit */
+#define MUSB_POWER 0x01 /* 8-bit */
+
+#define MUSB_INTRTX 0x02 /* 16-bit */
+#define MUSB_INTRRX 0x04
+#define MUSB_INTRTXE 0x06
+#define MUSB_INTRRXE 0x08
+#define MUSB_INTRUSB 0x0A /* 8 bit */
+#define MUSB_INTRUSBE 0x0B /* 8 bit */
+#define MUSB_FRAME 0x0C
+#define MUSB_INDEX 0x0E /* 8 bit */
+#define MUSB_TESTMODE 0x0F /* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL 0x60 /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
+#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS 0x6C /* 8 bit */
+
+#define MUSB_EPINFO 0x78 /* 8 bit */
+#define MUSB_RAMINFO 0x79 /* 8 bit */
+#define MUSB_LINKINFO 0x7a /* 8 bit */
+#define MUSB_VPLEN 0x7b /* 8 bit */
+#define MUSB_HS_EOF1 0x7c /* 8 bit */
+#define MUSB_FS_EOF1 0x7d /* 8 bit */
+#define MUSB_LS_EOF1 0x7e /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP 0x00
+#define MUSB_TXCSR 0x02
+#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
+#define MUSB_RXMAXP 0x04
+#define MUSB_RXCSR 0x06
+#define MUSB_RXCOUNT 0x08
+#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
+#define MUSB_TXTYPE 0x0A
+#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
+#define MUSB_TXINTERVAL 0x0B
+#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE 0x0C
+#define MUSB_RXINTERVAL 0x0D
+#define MUSB_FIFOSIZE 0x0F
+#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+ (0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+ (0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset) \
+ (0x10 + _offset)
+#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR 0x00
+#define MUSB_TXHUBADDR 0x02
+#define MUSB_TXHUBPORT 0x03
+
+#define MUSB_RXFUNCADDR 0x04
+#define MUSB_RXHUBADDR 0x06
+#define MUSB_RXHUBPORT 0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+ (0x80 + (8*(_epnum)) + (_offset))
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSB_POWER_ISOUPDATE 0x80
+#define MUSB_POWER_SOFTCONN 0x40
+#define MUSB_POWER_HSENAB 0x20
+#define MUSB_POWER_HSMODE 0x10
+#define MUSB_POWER_RESET 0x08
+#define MUSB_POWER_RESUME 0x04
+#define MUSB_POWER_SUSPENDM 0x02
+#define MUSB_POWER_ENSUSPEND 0x01
+
+/* INTRUSB */
+#define MUSB_INTR_SUSPEND 0x01
+#define MUSB_INTR_RESUME 0x02
+#define MUSB_INTR_RESET 0x04
+#define MUSB_INTR_BABBLE 0x04
+#define MUSB_INTR_SOF 0x08
+#define MUSB_INTR_CONNECT 0x10
+#define MUSB_INTR_DISCONNECT 0x20
+#define MUSB_INTR_SESSREQ 0x40
+#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */
+
+/* DEVCTL */
+#define MUSB_DEVCTL_BDEVICE 0x80
+#define MUSB_DEVCTL_FSDEV 0x40
+#define MUSB_DEVCTL_LSDEV 0x20
+#define MUSB_DEVCTL_VBUS 0x18
+#define MUSB_DEVCTL_VBUS_SHIFT 3
+#define MUSB_DEVCTL_HM 0x04
+#define MUSB_DEVCTL_HR 0x02
+#define MUSB_DEVCTL_SESSION 0x01
+
+/* TESTMODE */
+#define MUSB_TEST_FORCE_HOST 0x80
+#define MUSB_TEST_FIFO_ACCESS 0x40
+#define MUSB_TEST_FORCE_FS 0x20
+#define MUSB_TEST_FORCE_HS 0x10
+#define MUSB_TEST_PACKET 0x08
+#define MUSB_TEST_K 0x04
+#define MUSB_TEST_J 0x02
+#define MUSB_TEST_SE0_NAK 0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSB_FIFOSZ_DPB 0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSB_FIFOSZ_SIZE 0x0f
+
+/* CSR0 */
+#define MUSB_CSR0_FLUSHFIFO 0x0100
+#define MUSB_CSR0_TXPKTRDY 0x0002
+#define MUSB_CSR0_RXPKTRDY 0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSB_CSR0_P_SVDSETUPEND 0x0080
+#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040
+#define MUSB_CSR0_P_SENDSTALL 0x0020
+#define MUSB_CSR0_P_SETUPEND 0x0010
+#define MUSB_CSR0_P_DATAEND 0x0008
+#define MUSB_CSR0_P_SENTSTALL 0x0004
+
+/* CSR0 in Host mode */
+#define MUSB_CSR0_H_DIS_PING 0x0800
+#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */
+#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */
+#define MUSB_CSR0_H_NAKTIMEOUT 0x0080
+#define MUSB_CSR0_H_STATUSPKT 0x0040
+#define MUSB_CSR0_H_REQPKT 0x0020
+#define MUSB_CSR0_H_ERROR 0x0010
+#define MUSB_CSR0_H_SETUPPKT 0x0008
+#define MUSB_CSR0_H_RXSTALL 0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_CSR0_P_WZC_BITS \
+ (MUSB_CSR0_P_SENTSTALL)
+#define MUSB_CSR0_H_WZC_BITS \
+ (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
+ | MUSB_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSB_TYPE_SPEED 0xc0
+#define MUSB_TYPE_SPEED_SHIFT 6
+#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */
+#define MUSB_TYPE_PROTO_SHIFT 4
+#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */
+#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */
+#define MUSB_CONFIGDATA_BIGENDIAN 0x20
+#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
+#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
+#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */
+#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
+#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSB_TXCSR_AUTOSET 0x8000
+#define MUSB_TXCSR_MODE 0x2000
+#define MUSB_TXCSR_DMAENAB 0x1000
+#define MUSB_TXCSR_FRCDATATOG 0x0800
+#define MUSB_TXCSR_DMAMODE 0x0400
+#define MUSB_TXCSR_CLRDATATOG 0x0040
+#define MUSB_TXCSR_FLUSHFIFO 0x0008
+#define MUSB_TXCSR_FIFONOTEMPTY 0x0002
+#define MUSB_TXCSR_TXPKTRDY 0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSB_TXCSR_P_ISO 0x4000
+#define MUSB_TXCSR_P_INCOMPTX 0x0080
+#define MUSB_TXCSR_P_SENTSTALL 0x0020
+#define MUSB_TXCSR_P_SENDSTALL 0x0010
+#define MUSB_TXCSR_P_UNDERRUN 0x0004
+
+/* TXCSR in Host mode */
+#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200
+#define MUSB_TXCSR_H_DATATOGGLE 0x0100
+#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080
+#define MUSB_TXCSR_H_RXSTALL 0x0020
+#define MUSB_TXCSR_H_ERROR 0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_TXCSR_P_WZC_BITS \
+ (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
+ | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
+#define MUSB_TXCSR_H_WZC_BITS \
+ (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
+ | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSB_RXCSR_AUTOCLEAR 0x8000
+#define MUSB_RXCSR_DMAENAB 0x2000
+#define MUSB_RXCSR_DISNYET 0x1000
+#define MUSB_RXCSR_PID_ERR 0x1000
+#define MUSB_RXCSR_DMAMODE 0x0800
+#define MUSB_RXCSR_INCOMPRX 0x0100
+#define MUSB_RXCSR_CLRDATATOG 0x0080
+#define MUSB_RXCSR_FLUSHFIFO 0x0010
+#define MUSB_RXCSR_DATAERROR 0x0008
+#define MUSB_RXCSR_FIFOFULL 0x0002
+#define MUSB_RXCSR_RXPKTRDY 0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSB_RXCSR_P_ISO 0x4000
+#define MUSB_RXCSR_P_SENTSTALL 0x0040
+#define MUSB_RXCSR_P_SENDSTALL 0x0020
+#define MUSB_RXCSR_P_OVERRUN 0x0004
+
+/* RXCSR in Host mode */
+#define MUSB_RXCSR_H_AUTOREQ 0x4000
+#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400
+#define MUSB_RXCSR_H_DATATOGGLE 0x0200
+#define MUSB_RXCSR_H_RXSTALL 0x0040
+#define MUSB_RXCSR_H_REQPKT 0x0020
+#define MUSB_RXCSR_H_ERROR 0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_RXCSR_P_WZC_BITS \
+ (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
+ | MUSB_RXCSR_RXPKTRDY)
+#define MUSB_RXCSR_H_WZC_BITS \
+ (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
+ | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSB_HUBADDR_MULTI_TT 0x80
+
+#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 0000000..e0e9ce5
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,425 @@
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musb_core.h"
+
+
+static void musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: this doesn't necessarily put PHY into low power mode,
+ * turning off its clock; that's a function of PHY integration and
+ * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
+ * SE0 changing to connect (J) or wakeup (K) states.
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_suspend) {
+ int retries = 10000;
+
+ power &= ~MUSB_POWER_RESUME;
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
+ power = musb_readb(mbase, MUSB_POWER);
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
+
+ DBG(3, "Root port suspended, power %02x\n", power);
+
+ musb->port1_status |= USB_PORT_STAT_SUSPEND;
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+#endif
+ default:
+ DBG(1, "bogus rh suspend? %s\n",
+ otg_state_string(musb));
+ }
+ } else if (power & MUSB_POWER_SUSPENDM) {
+ power &= ~MUSB_POWER_SUSPENDM;
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ DBG(3, "Root port resuming, power %02x\n", power);
+
+ /* later, GetPortStatus will stop RESUME signaling */
+ musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies + msecs_to_jiffies(20);
+ }
+}
+
+static void musb_port_reset(struct musb *musb, bool do_reset)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+ DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ return;
+ }
+#endif
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: caller guarantees it will turn off the reset when
+ * the appropriate amount of time has passed
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_reset) {
+
+ /*
+ * If RESUME is set, we must make sure it stays minimum 20 ms.
+ * Then we must clear RESUME and wait a bit to let musb start
+ * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+ * fail with "Error! Did not receive an SOF before suspend
+ * detected".
+ */
+ if (power & MUSB_POWER_RESUME) {
+ while (time_before(jiffies, musb->rh_timer))
+ msleep(1);
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESUME);
+ msleep(1);
+ }
+
+ musb->ignore_disconnect = true;
+ power &= 0xf0;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESET);
+
+ musb->port1_status |= USB_PORT_STAT_RESET;
+ musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+ musb->rh_timer = jiffies + msecs_to_jiffies(50);
+ } else {
+ DBG(4, "root port reset stopped\n");
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESET);
+
+ musb->ignore_disconnect = false;
+
+ power = musb_readb(mbase, MUSB_POWER);
+ if (power & MUSB_POWER_HSMODE) {
+ DBG(4, "high-speed device connected\n");
+ musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+ }
+
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ musb->port1_status |= USB_PORT_STAT_ENABLE
+ | (USB_PORT_STAT_C_RESET << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ }
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+ musb->port1_status = (1 << USB_PORT_FEAT_POWER)
+ | (1 << USB_PORT_FEAT_C_CONNECTION);
+
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ musb->is_active = 0;
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
+ }
+}
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musb->lock */
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int retval = 0;
+
+ /* called in_irq() via usb_hcd_poll_rh_status() */
+ if (musb->port1_status & 0xffff0000) {
+ *buf = 0x02;
+ retval = 1;
+ }
+ return retval;
+}
+
+int musb_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ u32 temp;
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ /* hub features: always zero, setting is a NOP
+ * port features: reported, sometimes updated when host is active
+ * no indicators
+ */
+ switch (typeReq) {
+ case ClearHubFeature:
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, false);
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ musb_set_vbus(musb, 0);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_SUSPEND:
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "clear feature %d\n", wValue);
+ musb->port1_status &= ~(1 << wValue);
+ break;
+ case GetHubDescriptor:
+ {
+ struct usb_hub_descriptor *desc = (void *)buf;
+
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = __constant_cpu_to_le16(
+ 0x0001 /* per-port power switching */
+ | 0x0010 /* no overcurrent reporting */
+ );
+ desc->bPwrOn2PwrGood = 5; /* msec/2 */
+ desc->bHubContrCurrent = 0;
+
+ /* workaround bogus struct definition */
+ desc->DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->DeviceRemovable[1] = 0xff;
+ }
+ break;
+ case GetHubStatus:
+ temp = 0;
+ *(__le32 *) buf = cpu_to_le32(temp);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ goto error;
+
+ /* finish RESET signaling? */
+ if ((musb->port1_status & USB_PORT_STAT_RESET)
+ && time_after_eq(jiffies, musb->rh_timer))
+ musb_port_reset(musb, false);
+
+ /* finish RESUME signaling? */
+ if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
+ && time_after_eq(jiffies, musb->rh_timer)) {
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ DBG(4, "root port resume stopped, power %02x\n",
+ power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+
+ /* ISSUE: DaVinci (RTL 1.300) disconnects after
+ * resume of high speed peripherals (but not full
+ * speed ones).
+ */
+
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+ | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ }
+
+ put_unaligned(cpu_to_le32(musb->port1_status
+ & ~MUSB_PORT_STAT_RESUME),
+ (__le32 *) buf);
+
+ /* port change status is more interesting */
+ DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
+ musb->port1_status);
+ break;
+ case SetPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /* NOTE: this controller has a strange state machine
+ * that involves "requesting sessions" according to
+ * magic side effects from incompletely-described
+ * rules about startup...
+ *
+ * This call is what really starts the host mode; be
+ * very careful about side effects if you reorder any
+ * initialization logic, e.g. for OTG, or change any
+ * logic relating to VBUS power-up.
+ */
+ if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ musb_start(musb);
+ break;
+ case USB_PORT_FEAT_RESET:
+ musb_port_reset(musb, true);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, true);
+ break;
+ case USB_PORT_FEAT_TEST:
+ if (unlikely(is_host_active(musb)))
+ goto error;
+
+ wIndex >>= 8;
+ switch (wIndex) {
+ case 1:
+ pr_debug("TEST_J\n");
+ temp = MUSB_TEST_J;
+ break;
+ case 2:
+ pr_debug("TEST_K\n");
+ temp = MUSB_TEST_K;
+ break;
+ case 3:
+ pr_debug("TEST_SE0_NAK\n");
+ temp = MUSB_TEST_SE0_NAK;
+ break;
+ case 4:
+ pr_debug("TEST_PACKET\n");
+ temp = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ break;
+ case 5:
+ pr_debug("TEST_FORCE_ENABLE\n");
+ temp = MUSB_TEST_FORCE_HOST
+ | MUSB_TEST_FORCE_HS;
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
+ break;
+ case 6:
+ pr_debug("TEST_FIFO_ACCESS\n");
+ temp = MUSB_TEST_FIFO_ACCESS;
+ break;
+ default:
+ goto error;
+ }
+ musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "set feature %d\n", wValue);
+ musb->port1_status |= 1 << wValue;
+ break;
+
+ default:
+error:
+ /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 0000000..9ba8fb7
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,433 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include "musb_core.h"
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \
+ (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT 0
+#define MUSB_HSDMA_TRANSMIT_SHIFT 1
+#define MUSB_HSDMA_MODE1_SHIFT 2
+#define MUSB_HSDMA_IRQENABLE_SHIFT 3
+#define MUSB_HSDMA_ENDPOINT_SHIFT 4
+#define MUSB_HSDMA_BUSERROR_SHIFT 8
+#define MUSB_HSDMA_BURSTMODE_SHIFT 9
+#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
+#define MUSB_HSDMA_BURSTMODE_INCR4 1
+#define MUSB_HSDMA_BURSTMODE_INCR8 2
+#define MUSB_HSDMA_BURSTMODE_INCR16 3
+
+#define MUSB_HSDMA_CHANNELS 8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+ struct dma_channel Channel;
+ struct musb_dma_controller *controller;
+ u32 dwStartAddress;
+ u32 len;
+ u16 wMaxPacketSize;
+ u8 bIndex;
+ u8 epnum;
+ u8 transmit;
+};
+
+struct musb_dma_controller {
+ struct dma_controller Controller;
+ struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS];
+ void *pDmaPrivate;
+ void __iomem *pCoreBase;
+ u8 bChannelCount;
+ u8 bmUsedChannels;
+ u8 irq;
+};
+
+static int dma_controller_start(struct dma_controller *c)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel);
+
+static int dma_controller_stop(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller =
+ container_of(c, struct musb_dma_controller, Controller);
+ struct musb *musb = (struct musb *) controller->pDmaPrivate;
+ struct dma_channel *pChannel;
+ u8 bBit;
+
+ if (controller->bmUsedChannels != 0) {
+ dev_err(musb->controller,
+ "Stopping DMA controller while channel active\n");
+
+ for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+ if (controller->bmUsedChannels & (1 << bBit)) {
+ pChannel = &controller->aChannel[bBit].Channel;
+ dma_channel_release(pChannel);
+
+ if (!controller->bmUsedChannels)
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 transmit)
+{
+ u8 bBit;
+ struct dma_channel *pChannel = NULL;
+ struct musb_dma_channel *pImplChannel = NULL;
+ struct musb_dma_controller *controller =
+ container_of(c, struct musb_dma_controller, Controller);
+
+ for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+ if (!(controller->bmUsedChannels & (1 << bBit))) {
+ controller->bmUsedChannels |= (1 << bBit);
+ pImplChannel = &(controller->aChannel[bBit]);
+ pImplChannel->controller = controller;
+ pImplChannel->bIndex = bBit;
+ pImplChannel->epnum = hw_ep->epnum;
+ pImplChannel->transmit = transmit;
+ pChannel = &(pImplChannel->Channel);
+ pChannel->private_data = pImplChannel;
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+ pChannel->max_len = 0x10000;
+ /* Tx => mode 1; Rx => mode 0 */
+ pChannel->desired_mode = transmit;
+ pChannel->actual_len = 0;
+ break;
+ }
+ }
+ return pChannel;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+
+ pChannel->actual_len = 0;
+ pImplChannel->dwStartAddress = 0;
+ pImplChannel->len = 0;
+
+ pImplChannel->controller->bmUsedChannels &=
+ ~(1 << pImplChannel->bIndex);
+
+ pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *pChannel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+ struct musb_dma_controller *controller = pImplChannel->controller;
+ void __iomem *mbase = controller->pCoreBase;
+ u8 bChannel = pImplChannel->bIndex;
+ u16 csr = 0;
+
+ DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+ pChannel, packet_sz, dma_addr, len, mode);
+
+ if (mode) {
+ csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
+ BUG_ON(len < packet_sz);
+
+ if (packet_sz >= 64) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR16
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ } else if (packet_sz >= 32) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR8
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ } else if (packet_sz >= 16) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR4
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ }
+ }
+
+ csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+ | (1 << MUSB_HSDMA_ENABLE_SHIFT)
+ | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
+ | (pImplChannel->transmit
+ ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+ : 0);
+
+ /* address/count */
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ dma_addr);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ len);
+
+ /* control (this should start things) */
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ csr);
+}
+
+static int dma_channel_program(struct dma_channel *pChannel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+
+ DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+ pImplChannel->epnum,
+ pImplChannel->transmit ? "Tx" : "Rx",
+ packet_sz, dma_addr, len, mode);
+
+ BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ pChannel->status == MUSB_DMA_STATUS_BUSY);
+
+ pChannel->actual_len = 0;
+ pImplChannel->dwStartAddress = dma_addr;
+ pImplChannel->len = len;
+ pImplChannel->wMaxPacketSize = packet_sz;
+ pChannel->status = MUSB_DMA_STATUS_BUSY;
+
+ if ((mode == 1) && (len >= packet_sz))
+ configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+ else
+ configure_channel(pChannel, packet_sz, 0, dma_addr, len);
+
+ return true;
+}
+
+static int dma_channel_abort(struct dma_channel *pChannel)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+ u8 bChannel = pImplChannel->bIndex;
+ void __iomem *mbase = pImplChannel->controller->pCoreBase;
+ u16 csr;
+
+ if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
+ if (pImplChannel->transmit) {
+
+ csr = musb_readw(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_TXCSR));
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_DMAMODE);
+ musb_writew(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_TXCSR),
+ csr);
+ } else {
+ csr = musb_readw(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_RXCSR));
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+ MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_DMAMODE);
+ musb_writew(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_RXCSR),
+ csr);
+ }
+
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ 0);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ 0);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ 0);
+
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+ }
+ return 0;
+}
+
+static irqreturn_t dma_controller_irq(int irq, void *private_data)
+{
+ struct musb_dma_controller *controller =
+ (struct musb_dma_controller *)private_data;
+ struct musb_dma_channel *pImplChannel;
+ struct musb *musb = controller->pDmaPrivate;
+ void __iomem *mbase = controller->pCoreBase;
+ struct dma_channel *pChannel;
+ u8 bChannel;
+ u16 csr;
+ u32 dwAddress;
+ u8 int_hsdma;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+ if (!int_hsdma)
+ goto done;
+
+ for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
+ if (int_hsdma & (1 << bChannel)) {
+ pImplChannel = (struct musb_dma_channel *)
+ &(controller->aChannel[bChannel]);
+ pChannel = &pImplChannel->Channel;
+
+ csr = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+ MUSB_HSDMA_CONTROL));
+
+ if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
+ pImplChannel->Channel.status =
+ MUSB_DMA_STATUS_BUS_ABORT;
+ else {
+ u8 devctl;
+
+ dwAddress = musb_readl(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(
+ bChannel,
+ MUSB_HSDMA_ADDRESS));
+ pChannel->actual_len = dwAddress
+ - pImplChannel->dwStartAddress;
+
+ DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+ pChannel, pImplChannel->dwStartAddress,
+ dwAddress, pChannel->actual_len,
+ pImplChannel->len,
+ (pChannel->actual_len
+ < pImplChannel->len) ?
+ "=> reconfig 0" : "=> complete");
+
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+
+ /* completed */
+ if ((devctl & MUSB_DEVCTL_HM)
+ && (pImplChannel->transmit)
+ && ((pChannel->desired_mode == 0)
+ || (pChannel->actual_len &
+ (pImplChannel->wMaxPacketSize - 1)))
+ ) {
+ /* Send out the packet */
+ musb_ep_select(mbase,
+ pImplChannel->epnum);
+ musb_writew(mbase, MUSB_EP_OFFSET(
+ pImplChannel->epnum,
+ MUSB_TXCSR),
+ MUSB_TXCSR_TXPKTRDY);
+ } else
+ musb_dma_completion(
+ musb,
+ pImplChannel->epnum,
+ pImplChannel->transmit);
+ }
+ }
+ }
+ retval = IRQ_HANDLED;
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller;
+
+ controller = container_of(c, struct musb_dma_controller, Controller);
+ if (!controller)
+ return;
+
+ if (controller->irq)
+ free_irq(controller->irq, c);
+
+ kfree(controller);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq(pdev, 1);
+
+ if (irq == 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->bChannelCount = MUSB_HSDMA_CHANNELS;
+ controller->pDmaPrivate = musb;
+ controller->pCoreBase = pCoreBase;
+
+ controller->Controller.start = dma_controller_start;
+ controller->Controller.stop = dma_controller_stop;
+ controller->Controller.channel_alloc = dma_channel_allocate;
+ controller->Controller.channel_release = dma_channel_release;
+ controller->Controller.channel_program = dma_channel_program;
+ controller->Controller.channel_abort = dma_channel_abort;
+
+ if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+ musb->controller->bus_id, &controller->Controller)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ dma_controller_destroy(&controller->Controller);
+ return NULL;
+ }
+
+ controller->irq = irq;
+
+ return &controller->Controller;
+}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 0000000..298b22e
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2005-2007 by Texas Instruments
+ * Some code has been taken from tusb6010.c
+ * Copyrights for that are attributable to:
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+#include "omap2430.h"
+
+#ifdef CONFIG_ARCH_OMAP3430
+#define get_cpu_rev() 2
+#endif
+
+#define MUSB_TIMEOUT_A_WAIT_BCON 1100
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+ u8 power;
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_SUSPEND:
+ /* finish RESUME signaling? */
+ if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ DBG(1, "root port resume stopped, power %02x\n", power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+ | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ }
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ else
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#endif
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ del_timer(&musb_idle_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb_idle_timer))
+ last_timer = timeout;
+ else {
+ DBG(4, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ DBG(4, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb_idle_timer, timeout);
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+}
+void musb_platform_disable(struct musb *musb)
+{
+}
+static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void omap_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ musb->is_active = 1;
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ DBG(1, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+static int omap_set_power(struct otg_transceiver *x, unsigned mA)
+{
+ return 0;
+}
+
+static int musb_platform_resume(struct musb *musb);
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ switch (musb_mode) {
+ case MUSB_HOST:
+ otg_set_host(&musb->xceiv, musb->xceiv.host);
+ break;
+ case MUSB_PERIPHERAL:
+ otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
+ break;
+ case MUSB_OTG:
+ break;
+ }
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ u32 l;
+
+#if defined(CONFIG_ARCH_OMAP2430)
+ omap_cfg_reg(AE5_2430_USB0HS_STP);
+#endif
+
+ musb_platform_resume(musb);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l &= ~ENABLEWAKEUP; /* disable wakeup */
+ l &= ~NOSTDBY; /* remove possible nostdby */
+ l |= SMARTSTDBY; /* enable smart standby */
+ l &= ~AUTOIDLE; /* disable auto idle */
+ l &= ~NOIDLE; /* remove possible noidle */
+ l |= SMARTIDLE; /* enable smart idle */
+ l |= AUTOIDLE; /* enable auto idle */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ l = omap_readl(OTG_INTERFSEL);
+ l |= ULPI_12PIN;
+ omap_writel(l, OTG_INTERFSEL);
+
+ pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
+ omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
+ omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
+ omap_readl(OTG_SIMENABLE));
+
+ omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+ if (is_host_enabled(musb))
+ musb->board_set_vbus = omap_set_vbus;
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = omap_set_power;
+ musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+ u32 l;
+
+ if (!musb->clock)
+ return 0;
+
+ /* in any role */
+ l = omap_readl(OTG_FORCESTDBY);
+ l |= ENABLEFORCE; /* enable MSTANDBY */
+ omap_writel(l, OTG_FORCESTDBY);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l |= ENABLEWAKEUP; /* enable wakeup */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ if (musb->xceiv.set_suspend)
+ musb->xceiv.set_suspend(&musb->xceiv, 1);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ else
+ clk_disable(musb->clock);
+
+ return 0;
+}
+
+static int musb_platform_resume(struct musb *musb)
+{
+ u32 l;
+
+ if (!musb->clock)
+ return 0;
+
+ if (musb->xceiv.set_suspend)
+ musb->xceiv.set_suspend(&musb->xceiv, 0);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ else
+ clk_enable(musb->clock);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l &= ~ENABLEWAKEUP; /* disable wakeup */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ l = omap_readl(OTG_FORCESTDBY);
+ l &= ~ENABLEFORCE; /* disable MSTANDBY */
+ omap_writel(l, OTG_FORCESTDBY);
+
+ return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+ omap_vbus_power(musb, 0 /*off*/, 1);
+
+ musb_platform_suspend(musb);
+
+ clk_put(musb->clock);
+ musb->clock = 0;
+
+ return 0;
+}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 0000000..786a620
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include <asm/arch/hardware.h>
+#include <asm/arch/usb.h>
+
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define MENTOR_BASE_OFFSET 0
+#if defined(CONFIG_ARCH_OMAP2430)
+#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
+#elif defined(CONFIG_ARCH_OMAP3430)
+#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
+#endif
+#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
+#define OTG_REVISION OMAP_HSOTG(0x0)
+#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+# define MIDLEMODE 12 /* bit position */
+# define FORCESTDBY (0 << MIDLEMODE)
+# define NOSTDBY (1 << MIDLEMODE)
+# define SMARTSTDBY (2 << MIDLEMODE)
+# define SIDLEMODE 3 /* bit position */
+# define FORCEIDLE (0 << SIDLEMODE)
+# define NOIDLE (1 << SIDLEMODE)
+# define SMARTIDLE (2 << SIDLEMODE)
+# define ENABLEWAKEUP (1 << 2)
+# define SOFTRST (1 << 1)
+# define AUTOIDLE (1 << 0)
+#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+# define RESETDONE (1 << 0)
+#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+# define EXTCP (1 << 2)
+# define PHYSEL 0 /* bit position */
+# define UTMI_8BIT (0 << PHYSEL)
+# define ULPI_12PIN (1 << PHYSEL)
+# define ULPI_8PIN (2 << PHYSEL)
+#define OTG_SIMENABLE OMAP_HSOTG(0x10)
+# define TM1 (1 << 0)
+#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
+# define ENABLEFORCE (1 << 0)
+
+#endif /* CONFIG_ARCH_OMAP2430 */
+
+#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 0000000..b73b036
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1151 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ * configured for NOR FLASH interface instead of VLYNQ serial
+ * interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+static void tusb_source_power(struct musb *musb, int is_on);
+
+#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
+
+/*
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+u8 tusb_get_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 die_id;
+ u8 rev;
+
+ rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+ if (TUSB_REV_MAJOR(rev) == 3) {
+ die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
+ TUSB_DIDR1_HI));
+ if (die_id >= TUSB_DIDR1_HI_REV_31)
+ rev |= 1;
+ }
+
+ return rev;
+}
+
+static int __init tusb_print_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u8 rev;
+
+ rev = tusb_get_revision(musb);
+
+ pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
+ "prcm",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ "int",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ "gpio",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ "dma",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ "dieid",
+ TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
+ "rev",
+ TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
+
+ return tusb_get_revision(musb);
+}
+
+#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
+ | TUSB_PHY_OTG_CTRL_TESTM0)
+
+/*
+ * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
+ * Disables power detection in PHY for the duration of idle.
+ */
+static void tusb_wbus_quirk(struct musb *musb, int enabled)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ static u32 phy_otg_ctrl, phy_otg_ena;
+ u32 tmp;
+
+ if (enabled) {
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
+ | phy_otg_ena | WBUS_QUIRK_MASK;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
+ tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
+ & TUSB_PHY_OTG_CTRL_TESTM2) {
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ phy_otg_ctrl = 0;
+ phy_otg_ena = 0;
+ }
+}
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+static inline void
+tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ memcpy(&val, buf, 4);
+ musb_writel(fifo, 0, val);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Write the rest 1 - 3 bytes to FIFO */
+ memcpy(&val, buf, len);
+ musb_writel(fifo, 0, val);
+ }
+}
+
+static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+ void __iomem *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, 4);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Read the rest 1 - 3 bytes from FIFO */
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, len);
+ }
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ prefetch(buf);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+ TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ writesl(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use writesw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = (u32)(*(u16 *)buf);
+ buf += 2;
+ val |= (*(u16 *)buf) << 16;
+ buf += 2;
+ musb_writel(fifo, 0, val);
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_write_unaligned(fifo, buf, len);
+}
+
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ readsl(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use readsw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ *(u16 *)buf = (u16)(val & 0xffff);
+ buf += 2;
+ *(u16 *)buf = (u16)(val >> 16);
+ buf += 2;
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_read_unaligned(fifo, buf, len);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* This is used by gadget drivers, and OTG transceiver logic, allowing
+ * at most mA current to be drawn from VBUS during a Default-B session
+ * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
+ * mode), or low power Default-B sessions, something else supplies power.
+ * Caller must take care of locking.
+ */
+static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
+{
+ struct musb *musb = container_of(x, struct musb, xceiv);
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ /*
+ * Keep clock active when enabled. Note that this is not tied to
+ * drawing VBUS, as with OTG mA can be less than musb->min_power.
+ */
+ if (musb->set_clock) {
+ if (mA)
+ musb->set_clock(musb->clock, 1);
+ else
+ musb->set_clock(musb->clock, 0);
+ }
+
+ /* tps65030 seems to consume max 100mA, with maybe 60mA available
+ * (measured on one board) for things other than tps and tusb.
+ *
+ * Boards sharing the CPU clock with CLKIN will need to prevent
+ * certain idle sleep states while the USB link is active.
+ *
+ * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
+ * The actual current usage would be very board-specific. For now,
+ * it's simpler to just use an aggregate (also board-specific).
+ */
+ if (x->default_a || mA < (musb->min_power << 1))
+ mA = 0;
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ if (mA) {
+ musb->is_bus_powered = 1;
+ reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
+ } else {
+ musb->is_bus_powered = 0;
+ reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+ }
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ DBG(2, "draw max %d mA VBUS\n", mA);
+ return 0;
+}
+
+#else
+#define tusb_draw_power NULL
+#endif
+
+/* workaround for issue 13: change clock during chip idle
+ * (to be fixed in rev3 silicon) ... symptoms include disconnect
+ * or looping suspend/resume cycles
+ */
+static void tusb_set_clock_source(struct musb *musb, unsigned mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ reg = musb_readl(tbase, TUSB_PRCM_CONF);
+ reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
+
+ /* 0 = refclk (clkin, XI)
+ * 1 = PHY 60 MHz (internal PLL)
+ * 2 = not supported
+ * 3 = what?
+ */
+ if (mode > 0)
+ reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
+
+ musb_writel(tbase, TUSB_PRCM_CONF, reg);
+
+ /* FIXME tusb6010_platform_retime(mode == 0); */
+}
+
+/*
+ * Idle TUSB6010 until next wake-up event; NOR access always wakes.
+ * Other code ensures that we idle unless we're connected _and_ the
+ * USB link is not suspended ... and tells us the relevant wakeup
+ * events. SW_EN for voltage is handled separately.
+ */
+void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ if ((wakeup_enables & TUSB_PRCM_WBUS)
+ && (tusb_get_revision(musb) == TUSB_REV_30))
+ tusb_wbus_quirk(musb, 1);
+
+ tusb_set_clock_source(musb, 0);
+
+ wakeup_enables |= TUSB_PRCM_WNORCS;
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
+
+ /* REVISIT writeup of WID implies that if WID set and ID is grounded,
+ * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
+ * Presumably that's mostly to save power, hence WID is immaterial ...
+ */
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
+ if (is_host_active(musb)) {
+ reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ } else {
+ reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ }
+ reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ DBG(6, "idle, wake on %02x\n", wakeup_enables);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, prcm_mngmt;
+ int ret = 0;
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
+
+ /* Temporarily enable VBUS detection if it was disabled for
+ * suspend mode. Unless it's enabled otg_stat and devctl will
+ * not show correct VBUS state.
+ */
+ if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+ u32 tmp = prcm_mngmt;
+ tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
+ }
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
+ ret = 1;
+
+ return ret;
+}
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if ((musb->a_wait_bcon != 0)
+ && (musb->idle_timeout == 0
+ || time_after(jiffies, musb->idle_timeout))) {
+ DBG(4, "Nothing connected %s, turning off VBUS\n",
+ otg_state_string(musb));
+ }
+ /* FALLTHROUGH */
+ case OTG_STATE_A_IDLE:
+ tusb_source_power(musb, 0);
+ default:
+ break;
+ }
+
+ if (!musb->is_active) {
+ u32 wakeups;
+
+ /* wait until khubd handles port change status */
+ if (is_host_active(musb) && (musb->port1_status >> 16))
+ goto done;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+ wakeups = 0;
+ else {
+ wakeups = TUSB_PRCM_WHOSTDISCON
+ | TUSB_PRCM_WBUS
+ | TUSB_PRCM_WVBUS;
+ if (is_otg_enabled(musb))
+ wakeups |= TUSB_PRCM_WID;
+ }
+#else
+ wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
+#endif
+ tusb_allow_idle(musb, wakeups);
+ }
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Maybe put TUSB6010 into idle mode mode depending on USB link status,
+ * like "disconnected" or "suspended". We'll be woken out of it by
+ * connect, resume, or disconnect.
+ *
+ * Needs to be called as the last function everywhere where there is
+ * register access to TUSB6010 because of NOR flash wake-up.
+ * Caller should own controller spinlock.
+ *
+ * Delay because peripheral enables D+ pullup 3msec after SE0, and
+ * we don't want to treat that full speed J as a wakeup event.
+ * ... peripherals must draw only suspend current after 10 msec.
+ */
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ del_timer(&musb_idle_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb_idle_timer))
+ last_timer = timeout;
+ else {
+ DBG(4, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ DBG(4, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb_idle_timer, timeout);
+}
+
+/* ticks of 60 MHz clock */
+#define DEVCLOCK 60000000
+#define OTG_TIMER_MS(msecs) ((msecs) \
+ ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
+ | TUSB_DEV_OTG_TIMER_ENABLE) \
+ : 0)
+
+static void tusb_source_power(struct musb *musb, int is_on)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 conf, prcm, timer;
+ u8 devctl;
+
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ conf = musb_readl(tbase, TUSB_DEV_CONF);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ conf |= TUSB_DEV_CONF_USB_HOST_MODE;
+ MUSB_HST_MODE(musb);
+ } else {
+ u32 otg_stat;
+
+ timer = 0;
+
+ /* If ID pin is grounded, we want to be a_idle */
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ }
+ musb->is_active = 0;
+ musb->xceiv.default_a = 1;
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ }
+ prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
+ musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
+ musb_writel(tbase, TUSB_DEV_CONF, conf);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ musb_readl(tbase, TUSB_DEV_OTG_STAT),
+ conf, prcm);
+}
+
+/*
+ * Sets the mode to OTG, peripheral or host by changing the ID detection.
+ * Caller must take care of locking.
+ *
+ * Note that if a mini-A cable is plugged in the ID line will stay down as
+ * the weak ID pull-up is not able to pull the ID up.
+ *
+ * REVISIT: It would be possible to add support for changing between host
+ * and peripheral modes in non-OTG configurations by reconfiguring hardware
+ * and then setting musb->board_mode. For now, only support OTG mode.
+ */
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
+
+ if (musb->board_mode != MUSB_OTG) {
+ ERR("Changing mode currently only supported in OTG mode\n");
+ return;
+ }
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
+
+ switch (musb_mode) {
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case MUSB_HOST: /* Disable PHY ID detect, ground ID */
+ phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= TUSB_DEV_CONF_ID_SEL;
+ dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
+ break;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+#endif
+
+#ifdef CONFIG_USB_MUSB_OTG
+ case MUSB_OTG: /* Use PHY ID detection */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+#endif
+
+ default:
+ DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+ }
+
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
+ musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if ((musb_mode == MUSB_PERIPHERAL) &&
+ !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
+ INFO("Cannot be peripheral with mini-A cable "
+ "otg_stat: %08x\n", otg_stat);
+}
+
+static inline unsigned long
+tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
+{
+ u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ unsigned long idle_timeout = 0;
+
+ /* ID pin */
+ if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+ int default_a;
+
+ if (is_otg_enabled(musb))
+ default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+ else
+ default_a = is_host_enabled(musb);
+ DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
+ musb->xceiv.default_a = default_a;
+ tusb_source_power(musb, default_a);
+
+ /* Don't allow idling immediately */
+ if (default_a)
+ idle_timeout = jiffies + (HZ * 3);
+ }
+
+ /* VBUS state change */
+ if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
+
+ /* B-dev state machine: no vbus ~= disconnect */
+ if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+ || !is_host_enabled(musb)) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* ? musb_root_disconnect(musb); */
+ musb->port1_status &=
+ ~(USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED
+ | USB_PORT_STAT_TEST
+ );
+#endif
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
+ DBG(1, "Forcing disconnect (no interrupt)\n");
+ if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+ /* INTR_DISCONNECT can hide... */
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ }
+ musb->is_active = 0;
+ }
+ DBG(2, "vbus change, %s, otg %03x\n",
+ otg_state_string(musb), otg_stat);
+ idle_timeout = jiffies + (1 * HZ);
+ schedule_work(&musb->irq_work);
+
+ } else /* A-dev state machine */ {
+ DBG(2, "vbus change, %s, otg %03x\n",
+ otg_state_string(musb), otg_stat);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE:
+ DBG(2, "Got SRP, turning on VBUS\n");
+ musb_set_vbus(musb, 1);
+
+ /* CONNECT can wake if a_wait_bcon is set */
+ if (musb->a_wait_bcon != 0)
+ musb->is_active = 0;
+ else
+ musb->is_active = 1;
+
+ /*
+ * OPT FS A TD.4.6 needs few seconds for
+ * A_WAIT_VRISE
+ */
+ idle_timeout = jiffies + (2 * HZ);
+
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ /* ignore; A-session-valid < VBUS_VALID/2,
+ * we monitor this with the timer
+ */
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /* REVISIT this irq triggers during short
+ * spikes caused by enumeration ...
+ */
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ tusb_source_power(musb, 1);
+ } else {
+ musb->vbuserr_retry
+ = VBUSERR_RETRY_COUNT;
+ tusb_source_power(musb, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ /* OTG timer expiration */
+ if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+ u8 devctl;
+
+ DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ /* VBUS has probably been valid for a while now,
+ * but may well have bounced out of range a bit
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != MUSB_DEVCTL_VBUS) {
+ DBG(2, "devctl %02x\n", devctl);
+ break;
+ }
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ } else {
+ /* REVISIT report overcurrent to hub? */
+ ERR("vbus too slow, devctl %02x\n", devctl);
+ tusb_source_power(musb, 0);
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ break;
+ case OTG_STATE_A_SUSPEND:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ break;
+ default:
+ break;
+ }
+ }
+ schedule_work(&musb->irq_work);
+
+ return idle_timeout;
+}
+
+static irqreturn_t tusb_interrupt(int irq, void *__hci)
+{
+ struct musb *musb = __hci;
+ void __iomem *tbase = musb->ctrl_base;
+ unsigned long flags, idle_timeout = 0;
+ u32 int_mask, int_src;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Mask all interrupts to allow using both edge and level GPIO irq */
+ int_mask = musb_readl(tbase, TUSB_INT_MASK);
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+ DBG(3, "TUSB IRQ %08x\n", int_src);
+
+ musb->int_usb = (u8) int_src;
+
+ /* Acknowledge wake-up source interrupts */
+ if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+ u32 reg;
+ u32 i;
+
+ if (tusb_get_revision(musb) == TUSB_REV_30)
+ tusb_wbus_quirk(musb, 0);
+
+ /* there are issues re-locking the PLL on wakeup ... */
+
+ /* work around issue 8 */
+ for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
+ musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
+ musb_writel(tbase, TUSB_SCRATCH_PAD, i);
+ reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
+ if (reg == i)
+ break;
+ DBG(6, "TUSB NOR not ready\n");
+ }
+
+ /* work around issue 13 (2nd half) */
+ tusb_set_clock_source(musb, 1);
+
+ reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
+ if (reg & ~TUSB_PRCM_WNORCS) {
+ musb->is_active = 1;
+ schedule_work(&musb->irq_work);
+ }
+ DBG(3, "wake %sactive %02x\n",
+ musb->is_active ? "" : "in", reg);
+
+ /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
+ }
+
+ if (int_src & TUSB_INT_SRC_USB_IP_CONN)
+ del_timer(&musb_idle_timer);
+
+ /* OTG state change reports (annoyingly) not issued by Mentor core */
+ if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
+ | TUSB_INT_SRC_OTG_TIMEOUT
+ | TUSB_INT_SRC_ID_STATUS_CHNG))
+ idle_timeout = tusb_otg_ints(musb, int_src, tbase);
+
+ /* TX dma callback must be handled here, RX dma callback is
+ * handled in tusb_omap_dma_cb.
+ */
+ if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
+ u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
+ u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
+
+ DBG(3, "DMA IRQ %08x\n", dma_src);
+ real_dma_src = ~real_dma_src & dma_src;
+ if (tusb_dma_omap() && real_dma_src) {
+ int tx_source = (real_dma_src & 0xffff);
+ int i;
+
+ for (i = 1; i <= 15; i++) {
+ if (tx_source & (1 << i)) {
+ DBG(3, "completing ep%i %s\n", i, "tx");
+ musb_dma_completion(musb, i, 1);
+ }
+ }
+ }
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
+ }
+
+ /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+ u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
+
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
+ musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+ musb->int_tx = (musb_src & 0xffff);
+ } else {
+ musb->int_rx = 0;
+ musb->int_tx = 0;
+ }
+
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
+ musb_interrupt(musb);
+
+ /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR,
+ int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+ musb_platform_try_idle(musb, idle_timeout);
+
+ musb_writel(tbase, TUSB_INT_MASK, int_mask);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ */
+void musb_platform_enable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+ * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+ musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+ /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ /* Clear all subsystem interrups */
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+ /* Acknowledge pending interrupt(s) */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ /* Only 0 clock cycles for minimum interrupt de-assertion time and
+ * interrupt polarity active low seems to work reliably here */
+ musb_writel(tbase, TUSB_INT_CTRL_CONF,
+ TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+ set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+ /* maybe force into the Default-A OTG state machine */
+ if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+ & TUSB_DEV_OTG_STAT_ID_STATUS))
+ musb_writel(tbase, TUSB_INT_SRC_SET,
+ TUSB_INT_SRC_ID_STATUS_CHNG);
+
+ if (is_dma_capable() && dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* FIXME stop DMA, IRQs, timers, ... */
+
+ /* disable all IRQs */
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ del_timer(&musb_idle_timer);
+
+ if (is_dma_capable() && !dma_off) {
+ printk(KERN_WARNING "%s %s: dma still active\n",
+ __FILE__, __func__);
+ dma_off = 1;
+ }
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void __init tusb_setup_cpu_interface(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /*
+ * Disable GPIO[5:0] pullups (used as output DMA requests)
+ * Don't disable GPIO[7:6] as they are needed for wake-up.
+ */
+ musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
+
+ /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+ musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+ /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+ musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+ /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+ * de-assertion time 2 system clocks p 62 */
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ /* Set 0 wait count for synchronous burst access */
+ musb_writel(tbase, TUSB_WAIT_COUNT, 1);
+}
+
+static int __init tusb_start(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ int ret = 0;
+ unsigned long flags;
+ u32 reg;
+
+ if (musb->board_set_power)
+ ret = musb->board_set_power(1);
+ if (ret != 0) {
+ printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
+ TUSB_PROD_TEST_RESET_VAL) {
+ printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+ goto err;
+ }
+
+ ret = tusb_print_revision(musb);
+ if (ret < 2) {
+ printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+ ret);
+ goto err;
+ }
+
+ /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+ * NOR FLASH interface is used */
+ musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
+
+ /* Select PHY free running 60MHz as a system clock */
+ tusb_set_clock_source(musb, 1);
+
+ /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+ * power saving, enable VBus detect and session end comparators,
+ * enable IDpullup, enable VBus charging */
+ musb_writel(tbase, TUSB_PRCM_MNGMT,
+ TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+ TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+ TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+ TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+ TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+ tusb_setup_cpu_interface(musb);
+
+ /* simplify: always sense/pullup ID pins, as if in OTG mode */
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
+
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ return -ENODEV;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ struct platform_device *pdev;
+ struct resource *mem;
+ void __iomem *sync;
+ int ret;
+
+ pdev = to_platform_device(musb->controller);
+
+ /* dma address for async dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ musb->async = mem->start;
+
+ /* dma address for sync dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem) {
+ pr_debug("no sync dma resource?\n");
+ return -ENODEV;
+ }
+ musb->sync = mem->start;
+
+ sync = ioremap(mem->start, mem->end - mem->start + 1);
+ if (!sync) {
+ pr_debug("ioremap for sync failed\n");
+ return -ENOMEM;
+ }
+ musb->sync_va = sync;
+
+ /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+ * FIFOs at 0x600, TUSB at 0x800
+ */
+ musb->mregs += TUSB_BASE_OFFSET;
+
+ ret = tusb_start(musb);
+ if (ret) {
+ printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+ ret);
+ return -ENODEV;
+ }
+ musb->isr = tusb_interrupt;
+
+ if (is_host_enabled(musb))
+ musb->board_set_vbus = tusb_source_power;
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = tusb_draw_power;
+
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return ret;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ del_timer_sync(&musb_idle_timer);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ iounmap(musb->sync_va);
+
+ return 0;
+}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 0000000..ab8c962
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,233 @@
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+extern u8 tusb_get_revision(struct musb *musb);
+
+#ifdef CONFIG_USB_TUSB6010
+#define musb_in_tusb() 1
+#else
+#define musb_in_tusb() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL 0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET 0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE 0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE 0x800
+
+#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
+#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
+#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
+#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
+#define TUSB_DEV_CONF_ID_SEL (1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
+#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
+#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
+#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
+#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
+#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
+#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
+#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
+#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
+#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
+#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
+#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
+#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
+#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
+#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
+#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
+#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
+#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
+#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
+#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
+#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
+#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
+#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
+#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
+#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
+#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
+#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
+#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
+#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
+#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
+#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
+#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
+
+#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
+# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
+# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
+#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
+#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
+#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
+#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
+#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
+#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
+#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
+#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
+#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
+#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
+#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
+#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
+#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
+#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
+#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
+#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
+#define TUSB_PRCM_WGPIO_7 (1 << 12)
+#define TUSB_PRCM_WGPIO_6 (1 << 11)
+#define TUSB_PRCM_WGPIO_5 (1 << 10)
+#define TUSB_PRCM_WGPIO_4 (1 << 9)
+#define TUSB_PRCM_WGPIO_3 (1 << 8)
+#define TUSB_PRCM_WGPIO_2 (1 << 7)
+#define TUSB_PRCM_WGPIO_1 (1 << 6)
+#define TUSB_PRCM_WGPIO_0 (1 << 5)
+#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
+#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
+#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
+#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
+#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
+#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
+#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
+#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
+#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
+#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
+#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
+#define TUSB_INT_SRC_DEV_READY (1 << 12)
+#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
+#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
+#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
+#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
+#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
+#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
+#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
+#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
+#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
+#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
+#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
+#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
+#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
+#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
+#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
+ TUSB_INT_MASK_RESERVED_13 | \
+ TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
+ TUSB_INT_SRC_RESERVED_18 | \
+ TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
+
+#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN (1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL 0xa596
+#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
+
+#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
+#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
+#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf)
+#define TUSB_DIDR1_HI_REV_20 0
+#define TUSB_DIDR1_HI_REV_30 1
+#define TUSB_DIDR1_HI_REV_31 2
+
+#define TUSB_REV_10 0x10
+#define TUSB_REV_20 0x20
+#define TUSB_REV_30 0x30
+#define TUSB_REV_31 0x31
+
+#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 0000000..52f7f29
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,719 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+
+#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
+
+#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_omap_dma_ch {
+ struct musb *musb;
+ void __iomem *tbase;
+ unsigned long phys_offset;
+ int epnum;
+ u8 tx;
+ struct musb_hw_ep *hw_ep;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ struct tusb_omap_dma *tusb_dma;
+
+ void __iomem *dma_addr;
+
+ u32 len;
+ u16 packet_sz;
+ u16 transfer_packet_sz;
+ u32 transfer_len;
+ u32 completed_len;
+};
+
+struct tusb_omap_dma {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *tbase;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+ unsigned multichannel:1;
+};
+
+static int tusb_omap_dma_start(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+ return 0;
+}
+
+static int tusb_omap_dma_stop(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+ return 0;
+}
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if (reg != 0) {
+ DBG(3, "ep%i dmareq0 is busy for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return -EAGAIN;
+ }
+
+ if (chdat->tx)
+ reg = (1 << 4) | chdat->epnum;
+ else
+ reg = chdat->epnum;
+
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if ((reg & 0xf) != chdat->epnum) {
+ printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return;
+ }
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
+}
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct dma_channel *channel = (struct dma_channel *)data;
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *mbase = musb->mregs;
+ unsigned long remaining, flags, pio;
+ int ch;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (tusb_dma->multichannel)
+ ch = chdat->ch;
+ else
+ ch = tusb_dma->ch;
+
+ if (ch_status != OMAP_DMA_BLOCK_IRQ)
+ printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
+
+ DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, ch_status);
+
+ if (chdat->tx)
+ remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
+
+ /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
+ if (unlikely(remaining > chdat->transfer_len)) {
+ DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
+ chdat->tx ? "tx" : "rx", chdat->ch,
+ remaining);
+ remaining = 0;
+ }
+
+ channel->actual_len = chdat->transfer_len - remaining;
+ pio = chdat->len - channel->actual_len;
+
+ DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+
+ /* Transfer remaining 1 - 31 bytes */
+ if (pio > 0 && pio < 32) {
+ u8 *buf;
+
+ DBG(3, "Using PIO for remaining %lu bytes\n", pio);
+ buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
+ if (chdat->tx) {
+ dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+ chdat->transfer_len, DMA_TO_DEVICE);
+ musb_write_fifo(hw_ep, pio, buf);
+ } else {
+ musb_read_fifo(hw_ep, pio, buf);
+ dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+ chdat->transfer_len, DMA_FROM_DEVICE);
+ }
+ channel->actual_len += pio;
+ }
+
+ if (!tusb_dma->multichannel)
+ tusb_omap_free_shared_dmareq(chdat);
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ /* Handle only RX callbacks here. TX callbacks must be handled based
+ * on the TUSB DMA status interrupt.
+ * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
+ * interrupt for RX and TX.
+ */
+ if (!chdat->tx)
+ musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+ /* We must terminate short tx transfers manually by setting TXPKTRDY.
+ * REVISIT: This same problem may occur with other MUSB dma as well.
+ * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+ */
+ if ((chdat->transfer_len < chdat->packet_sz)
+ || (chdat->transfer_len % chdat->packet_sz != 0)) {
+ u16 csr;
+
+ if (chdat->tx) {
+ DBG(3, "terminating short tx packet\n");
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ }
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+ u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *ep_conf = hw_ep->conf;
+ dma_addr_t fifo = hw_ep->fifo_sync;
+ struct omap_dma_channel_params dma_params;
+ u32 dma_remaining;
+ int src_burst, dst_burst;
+ u16 csr;
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
+ return false;
+
+ /*
+ * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
+ * register which will cause missed DMA interrupt. We could try to
+ * use a timer for the callback, but it is unsafe as the XFR_SIZE
+ * register is corrupt, and we won't know if the DMA worked.
+ */
+ if (dma_addr & 0x2)
+ return false;
+
+ /*
+ * Because of HW issue #10, it seems like mixing sync DMA and async
+ * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
+ * using the channel for DMA.
+ */
+ if (chdat->tx)
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
+ if (dma_remaining) {
+ DBG(2, "Busy %s dma ch%i, not using: %08x\n",
+ chdat->tx ? "tx" : "rx", chdat->ch,
+ dma_remaining);
+ return false;
+ }
+
+ chdat->transfer_len = len & ~0x1f;
+
+ if (len < packet_sz)
+ chdat->transfer_packet_sz = chdat->transfer_len;
+ else
+ chdat->transfer_packet_sz = packet_sz;
+
+ if (tusb_dma->multichannel) {
+ ch = chdat->ch;
+ dmareq = chdat->dmareq;
+ sync_dev = chdat->sync_dev;
+ } else {
+ if (tusb_omap_use_shared_dmareq(chdat) != 0) {
+ DBG(3, "could not get dma for ep%i\n", chdat->epnum);
+ return false;
+ }
+ if (tusb_dma->ch < 0) {
+ /* REVISIT: This should get blocked earlier, happens
+ * with MSC ErrorRecoveryTest
+ */
+ WARN_ON(1);
+ return false;
+ }
+
+ ch = tusb_dma->ch;
+ dmareq = tusb_dma->dmareq;
+ sync_dev = tusb_dma->sync_dev;
+ omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
+ }
+
+ chdat->packet_sz = packet_sz;
+ chdat->len = len;
+ channel->actual_len = 0;
+ chdat->dma_addr = (void __iomem *)dma_addr;
+ channel->status = MUSB_DMA_STATUS_BUSY;
+
+ /* Since we're recycling dma areas, we need to clean or invalidate */
+ if (chdat->tx)
+ dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
+ else
+ dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
+
+ /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
+ if ((dma_addr & 0x3) == 0) {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.elem_count = 8; /* Elements in frame */
+ } else {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+ dma_params.elem_count = 16; /* Elements in frame */
+ fifo = hw_ep->fifo_async;
+ }
+
+ dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
+
+ DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, dma_addr, chdat->transfer_len, len,
+ chdat->transfer_packet_sz, packet_sz);
+
+ /*
+ * Prepare omap DMA for transfer
+ */
+ if (chdat->tx) {
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.src_start = (unsigned long)dma_addr;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.dst_start = (unsigned long)fifo;
+ dma_params.dst_ei = 1;
+ dma_params.dst_fi = -31; /* Loop 32 byte window */
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 0; /* Dest sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */
+ } else {
+ dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.src_start = (unsigned long)fifo;
+ dma_params.src_ei = 1;
+ dma_params.src_fi = -31; /* Loop 32 byte window */
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.dst_start = (unsigned long)dma_addr;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 1; /* Source sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
+ }
+
+ DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
+ ((dma_addr & 0x3) == 0) ? "sync" : "async",
+ dma_params.src_start, dma_params.dst_start);
+
+ omap_set_dma_params(ch, &dma_params);
+ omap_set_dma_src_burst_mode(ch, src_burst);
+ omap_set_dma_dest_burst_mode(ch, dst_burst);
+ omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+
+ /*
+ * Prepare MUSB for DMA transfer
+ */
+ if (chdat->tx) {
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ } else {
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_DMAENAB;
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+ musb_writew(hw_ep->regs, MUSB_RXCSR,
+ csr | MUSB_RXCSR_P_WZC_BITS);
+ }
+
+ /*
+ * Start DMA transfer
+ */
+ omap_start_dma(ch);
+
+ if (chdat->tx) {
+ /* Send transfer_packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ chdat->transfer_packet_sz);
+
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ } else {
+ /* Receive transfer_packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ chdat->transfer_packet_sz << 16);
+
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ }
+
+ return true;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+
+ if (!tusb_dma->multichannel) {
+ if (tusb_dma->ch >= 0) {
+ omap_stop_dma(tusb_dma->ch);
+ omap_free_dma(tusb_dma->ch);
+ tusb_dma->ch = -1;
+ }
+
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ int i, dmareq_nr = -1;
+
+ const int sync_dev[6] = {
+ OMAP24XX_DMA_EXT_DMAREQ0,
+ OMAP24XX_DMA_EXT_DMAREQ1,
+ OMAP242X_DMA_EXT_DMAREQ2,
+ OMAP242X_DMA_EXT_DMAREQ3,
+ OMAP242X_DMA_EXT_DMAREQ4,
+ OMAP242X_DMA_EXT_DMAREQ5,
+ };
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+ if (cur == 0) {
+ dmareq_nr = i;
+ break;
+ }
+ }
+
+ if (dmareq_nr == -1)
+ return -EAGAIN;
+
+ reg |= (chdat->epnum << (dmareq_nr * 5));
+ if (chdat->tx)
+ reg |= ((1 << 4) << (dmareq_nr * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = dmareq_nr;
+ chdat->sync_dev = sync_dev[chdat->dmareq];
+
+ return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg;
+
+ if (!chdat || chdat->dmareq < 0)
+ return;
+
+ reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ reg &= ~(0x1f << (chdat->dmareq * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = -1;
+ chdat->sync_dev = -1;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep,
+ u8 tx)
+{
+ int ret, i;
+ const char *dev_name;
+ struct tusb_omap_dma *tusb_dma;
+ struct musb *musb;
+ void __iomem *tbase;
+ struct dma_channel *channel = NULL;
+ struct tusb_omap_dma_ch *chdat = NULL;
+ u32 reg;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ musb = tusb_dma->musb;
+ tbase = musb->ctrl_base;
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+ if (tx)
+ reg &= ~(1 << hw_ep->epnum);
+ else
+ reg &= ~(1 << (hw_ep->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+ /* REVISIT: Why does dmareq5 not work? */
+ if (hw_ep->epnum == 0) {
+ DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+ return NULL;
+ }
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
+ ch->status = MUSB_DMA_STATUS_FREE;
+ channel = ch;
+ chdat = ch->private_data;
+ break;
+ }
+ }
+
+ if (!channel)
+ return NULL;
+
+ if (tx) {
+ chdat->tx = 1;
+ dev_name = "TUSB transmit";
+ } else {
+ chdat->tx = 0;
+ dev_name = "TUSB receive";
+ }
+
+ chdat->musb = tusb_dma->musb;
+ chdat->tbase = tusb_dma->tbase;
+ chdat->hw_ep = hw_ep;
+ chdat->epnum = hw_ep->epnum;
+ chdat->dmareq = -1;
+ chdat->completed_len = 0;
+ chdat->tusb_dma = tusb_dma;
+
+ channel->max_len = 0x7fffffff;
+ channel->desired_mode = 0;
+ channel->actual_len = 0;
+
+ if (tusb_dma->multichannel) {
+ ret = tusb_omap_dma_allocate_dmareq(chdat);
+ if (ret != 0)
+ goto free_dmareq;
+
+ ret = omap_request_dma(chdat->sync_dev, dev_name,
+ tusb_omap_dma_cb, channel, &chdat->ch);
+ if (ret != 0)
+ goto free_dmareq;
+ } else if (tusb_dma->ch == -1) {
+ tusb_dma->dmareq = 0;
+ tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
+
+ /* Callback data gets set later in the shared dmareq case */
+ ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
+ tusb_omap_dma_cb, NULL, &tusb_dma->ch);
+ if (ret != 0)
+ goto free_dmareq;
+
+ chdat->dmareq = -1;
+ chdat->ch = -1;
+ }
+
+ DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+ chdat->epnum,
+ chdat->tx ? "tx" : "rx",
+ chdat->ch >= 0 ? "dedicated" : "shared",
+ chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
+ chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
+ chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+
+ return channel;
+
+free_dmareq:
+ tusb_omap_dma_free_dmareq(chdat);
+
+ DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct musb *musb = chdat->musb;
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
+
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ if (chdat->ch >= 0) {
+ omap_stop_dma(chdat->ch);
+ omap_free_dma(chdat->ch);
+ chdat->ch = -1;
+ }
+
+ if (chdat->dmareq >= 0)
+ tusb_omap_dma_free_dmareq(chdat);
+
+ channel = NULL;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch) {
+ kfree(ch->private_data);
+ kfree(ch);
+ }
+ }
+
+ if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ omap_free_dma(tusb_dma->ch);
+
+ kfree(tusb_dma);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ /* REVISIT: Get dmareq lines used from board-*.c */
+
+ musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+ if (!tusb_dma)
+ goto cleanup;
+
+ tusb_dma->musb = musb;
+ tusb_dma->tbase = musb->ctrl_base;
+
+ tusb_dma->ch = -1;
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+
+ tusb_dma->controller.start = tusb_omap_dma_start;
+ tusb_dma->controller.stop = tusb_omap_dma_stop;
+ tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+ tusb_dma->controller.channel_release = tusb_omap_dma_release;
+ tusb_dma->controller.channel_program = tusb_omap_dma_program;
+ tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+ if (tusb_get_revision(musb) >= TUSB_REV_30)
+ tusb_dma->multichannel = 1;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch;
+ struct tusb_omap_dma_ch *chdat;
+
+ ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+ if (!ch)
+ goto cleanup;
+
+ dma_channel_pool[i] = ch;
+
+ chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+ if (!chdat)
+ goto cleanup;
+
+ ch->status = MUSB_DMA_STATUS_UNKNOWN;
+ ch->private_data = chdat;
+ }
+
+ return &tusb_dma->controller;
+
+cleanup:
+ dma_controller_destroy(&tusb_dma->controller);
+
+ return NULL;
+}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8878c17..70338f4 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED
config USB_SERIAL_SIERRAWIRELESS
tristate "USB Sierra Wireless Driver"
help
- Say M here if you want to use a Sierra Wireless device (if
- using an PC 5220 or AC580 please use the Airprime driver
- instead).
+ Say M here if you want to use Sierra Wireless devices.
+
+ Many deviecs have a feature known as TRU-Install, for those devices
+ to work properly the USB Storage Sierra feature must be enabled.
To compile this driver as a module, choose M here: the
module will be called sierra.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8387172..984f6ef 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
@@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
{ }, /* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a577ea4..382265b 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -524,7 +524,9 @@
#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
/*
* Definitions for ID TECH (www.idt-net.com) devices
@@ -815,6 +817,11 @@
#define OLIMEX_VID 0x15BA
#define OLIMEX_ARM_USB_OCD_PID 0x0003
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
+
/* www.elsterelectricity.com Elster Unicom III Optical Probe */
#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 2e663f1..d953820 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -38,8 +38,6 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include <linux/version.h>
-
/* the mode to be set when the port ist opened */
static int initial_mode = 1;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4eca95..9f9cd36 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -173,6 +173,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define KYOCERA_PRODUCT_KPC680 0x180a
#define ANYDATA_VENDOR_ID 0x16d5
+#define ANYDATA_PRODUCT_ADU_620UW 0x6202
#define ANYDATA_PRODUCT_ADU_E100A 0x6501
#define ANYDATA_PRODUCT_ADU_500A 0x6502
@@ -186,6 +187,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003
+#define BANDRICH_PRODUCT_1004 0x1004
+#define BANDRICH_PRODUCT_1005 0x1005
+#define BANDRICH_PRODUCT_1006 0x1006
+#define BANDRICH_PRODUCT_1007 0x1007
+#define BANDRICH_PRODUCT_1008 0x1008
+#define BANDRICH_PRODUCT_1009 0x1009
+#define BANDRICH_PRODUCT_100A 0x100a
+
+#define BANDRICH_PRODUCT_100B 0x100b
+#define BANDRICH_PRODUCT_100C 0x100c
+#define BANDRICH_PRODUCT_100D 0x100d
+#define BANDRICH_PRODUCT_100E 0x100e
+
+#define BANDRICH_PRODUCT_100F 0x100f
+#define BANDRICH_PRODUCT_1010 0x1010
+#define BANDRICH_PRODUCT_1011 0x1011
+#define BANDRICH_PRODUCT_1012 0x1012
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_9508 0x0800
@@ -197,6 +215,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
+/* ZTE PRODUCTS */
+#define ZTE_VENDOR_ID 0x19d2
+#define ZTE_PRODUCT_MF628 0x0015
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -297,17 +319,34 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -346,11 +385,7 @@ static struct usb_serial_driver option_1port_device = {
.read_int_callback = option_instat_callback,
};
-#ifdef CONFIG_USB_DEBUG
static int debug;
-#else
-#define debug 0
-#endif
/* per port private data */
@@ -954,8 +989,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-#ifdef CONFIG_USB_DEBUG
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2c9c446..1ede144 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
- { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6ac3bbc..a3bd039 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -107,10 +107,6 @@
#define COREGA_VENDOR_ID 0x07aa
#define COREGA_PRODUCT_ID 0x002a
-/* HL HL-340 (ID: 4348:5523) */
-#define HL340_VENDOR_ID 0x4348
-#define HL340_PRODUCT_ID 0x5523
-
/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
#define YCCABLE_VENDOR_ID 0x05ad
#define YCCABLE_PRODUCT_ID 0x0fba
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2f6f152..7060337 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
-#define DRIVER_VERSION "v.1.2.9c"
+#define DRIVER_VERSION "v.1.2.13a"
#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
@@ -31,6 +31,7 @@
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
#define SWIMS_USB_REQUEST_SetMode 0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
#define SWIMS_SET_MODE_Modem 0x0001
/* per port private data */
@@ -40,18 +41,11 @@
static int debug;
static int nmea;
-static int truinstall = 1;
-
-enum devicetype {
- DEVICE_3_PORT = 0,
- DEVICE_1_PORT = 1,
- DEVICE_INSTALLER = 2,
-};
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
int result;
- dev_dbg(&udev->dev, "%s", "SET POWER STATE\n");
+ dev_dbg(&udev->dev, "%s", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetPower, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
return result;
}
-static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
-{
- int result;
- dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n");
- result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- SWIMS_USB_REQUEST_SetMode, /* __u8 request */
- USB_TYPE_VENDOR, /* __u8 request type */
- eSWocMode, /* __u16 value */
- 0x0000, /* __u16 index */
- NULL, /* void *data */
- 0, /* __u16 size */
- USB_CTRL_SET_TIMEOUT); /* int timeout */
- return result;
-}
-
static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
{
int result;
- dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n");
+ dev_dbg(&udev->dev, "%s", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
{
int result;
int *num_ports = usb_get_serial_data(serial);
+ dev_dbg(&serial->dev->dev, "%s", __func__);
result = *num_ports;
@@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
static int sierra_calc_interface(struct usb_serial *serial)
{
- int interface;
- struct usb_interface *p_interface;
- struct usb_host_interface *p_host_interface;
+ int interface;
+ struct usb_interface *p_interface;
+ struct usb_host_interface *p_host_interface;
+ dev_dbg(&serial->dev->dev, "%s", __func__);
- /* Get the interface structure pointer from the serial struct */
- p_interface = serial->interface;
+ /* Get the interface structure pointer from the serial struct */
+ p_interface = serial->interface;
- /* Get a pointer to the host interface structure */
- p_host_interface = p_interface->cur_altsetting;
+ /* Get a pointer to the host interface structure */
+ p_host_interface = p_interface->cur_altsetting;
- /* read the interface descriptor for this active altsetting
- * to find out the interface number we are on
- */
- interface = p_host_interface->desc.bInterfaceNumber;
+ /* read the interface descriptor for this active altsetting
+ * to find out the interface number we are on
+ */
+ interface = p_host_interface->desc.bInterfaceNumber;
- return interface;
+ return interface;
}
static int sierra_probe(struct usb_serial *serial,
@@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial,
struct usb_device *udev;
int *num_ports;
u8 ifnum;
+ u8 numendpoints;
+
+ dev_dbg(&serial->dev->dev, "%s", __func__);
num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
if (!num_ports)
return -ENOMEM;
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
udev = serial->dev;
- /* Figure out the interface number from the serial structure */
- ifnum = sierra_calc_interface(serial);
-
- /*
- * If this interface supports more than 1 alternate
- * select the 2nd one
- */
- if (serial->interface->num_altsetting == 2) {
- dev_dbg(&udev->dev,
- "Selecting alt setting for interface %d\n",
- ifnum);
+ /* Figure out the interface number from the serial structure */
+ ifnum = sierra_calc_interface(serial);
- /* We know the alternate setting is 1 for the MC8785 */
- usb_set_interface(udev, ifnum, 1);
- }
+ /*
+ * If this interface supports more than 1 alternate
+ * select the 2nd one
+ */
+ if (serial->interface->num_altsetting == 2) {
+ dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
+ ifnum);
+ /* We know the alternate setting is 1 for the MC8785 */
+ usb_set_interface(udev, ifnum, 1);
+ }
- /* Check if in installer mode */
- if (truinstall && id->driver_info == DEVICE_INSTALLER) {
- dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n");
- result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
- /* Don't bind to the device when in installer mode */
- kfree(num_ports);
- return -EIO;
- } else if (id->driver_info == DEVICE_1_PORT)
- *num_ports = 1;
- else if (ifnum == 0x99)
+ /* Dummy interface present on some SKUs should be ignored */
+ if (ifnum == 0x99)
*num_ports = 0;
+ else if (numendpoints <= 3)
+ *num_ports = 1;
else
- *num_ports = 3;
+ *num_ports = (numendpoints-1)/2;
+
/*
* save off our num_ports info so that we can use it in the
* calc_num_ports callback
@@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
+ { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
{ USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
{ USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
- { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */
+ /* Sierra Wireless C597 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
+ { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
{ USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
- { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */
+ { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */
{ USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
{ USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
{ USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
- { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
- { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
- { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/
+ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
+ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
+ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
+ { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
+ { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
+ { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
{ USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
{ USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
{ USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
{ USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
{ USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */
{ USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
- { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */
- { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */
-
- { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */
- { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */
-
- { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */
- { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */
+ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
+ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
+ /* Sierra Wireless C885 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
+
+ { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
+ { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
- { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty,
if (portdata->rts_state)
val |= 0x02;
- /* Determine which port is targeted */
- if (port->bulk_out_endpointAddress == 2)
- interface = 0;
- else if (port->bulk_out_endpointAddress == 4)
- interface = 1;
- else if (port->bulk_out_endpointAddress == 5)
- interface = 2;
+ /* If composite device then properly report interface */
+ if (serial->num_ports == 1)
+ interface = sierra_calc_interface(serial);
+
+ /* Otherwise the need to do non-composite mapping */
+ else {
+ if (port->bulk_out_endpointAddress == 2)
+ interface = 0;
+ else if (port->bulk_out_endpointAddress == 4)
+ interface = 1;
+ else if (port->bulk_out_endpointAddress == 5)
+ interface = 2;
+ }
return usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
@@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial)
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
- .name = "sierra1",
+ .name = "sierra",
},
.description = "Sierra USB modem",
.id_table = id_table,
@@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-module_param(truinstall, bool, 0);
-MODULE_PARM_DESC(truinstall, "TRU-Install support");
-
-module_param(nmea, bool, 0);
+module_param(nmea, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nmea, "NMEA streaming");
-#ifdef CONFIG_USB_DEBUG
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8c2d531..b157c48 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial)
dbg("%s", __func__);
- if (serial == NULL)
- return;
-
for (i = 0; i < serial->num_ports; ++i)
serial_table[serial->minor + i] = NULL;
}
@@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref)
serial->type->shutdown(serial);
/* return the minor range that this device had */
- return_serial(serial);
+ if (serial->minor != SERIAL_TTY_NO_MINOR)
+ return_serial(serial);
for (i = 0; i < serial->num_ports; ++i)
serial->port[i]->port.count = 0;
@@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
serial->interface = interface;
kref_init(&serial->kref);
mutex_init(&serial->disc_mutex);
+ serial->minor = SERIAL_TTY_NO_MINOR;
return serial;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d92496..c760346 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,6 +146,18 @@ config USB_STORAGE_KARMA
on the resulting scsi device node returns the Karma to normal
operation.
+config USB_STORAGE_SIERRA
+ bool "Sierra Wireless TRU-Install Feature Support"
+ depends on USB_STORAGE
+ help
+ Say Y here to include additional code to support Sierra Wireless
+ products with the TRU-Install feature (e.g., AC597E, AC881U).
+
+ This code switches the Sierra Wireless device from being in
+ Mass Storage mode to Modem mode. It also has the ability to
+ support host software upgrades should full Linux support be added
+ to TRU-Install.
+
config USB_STORAGE_CYPRESS_ATACB
bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4c596c7..bc3415b 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
+usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
new file mode 100644
index 0000000..4359a2c
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.c
@@ -0,0 +1,207 @@
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/usb.h>
+
+#include "usb.h"
+#include "transport.h"
+#include "protocol.h"
+#include "scsiglue.h"
+#include "sierra_ms.h"
+#include "debug.h"
+
+#define SWIMS_USB_REQUEST_SetSwocMode 0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
+#define SWIMS_USB_INDEX_SetMode 0x0000
+#define SWIMS_SET_MODE_Modem 0x0001
+
+#define TRU_NORMAL 0x01
+#define TRU_FORCE_MS 0x02
+#define TRU_FORCE_MODEM 0x03
+
+static unsigned int swi_tru_install = 1;
+module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
+ " 2=Force CD-Rom, 3=Force Modem)");
+
+struct swoc_info {
+ __u8 rev;
+ __u8 reserved[8];
+ __u16 LinuxSKU;
+ __u16 LinuxVer;
+ __u8 reserved2[47];
+} __attribute__((__packed__));
+
+static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
+{
+ if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
+ (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
+ return true;
+ else
+ return false;
+}
+
+static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
+{
+ int result;
+ US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n");
+ result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */
+ USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */
+ eSWocMode, /* __u16 value */
+ 0x0000, /* __u16 index */
+ NULL, /* void *data */
+ 0, /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+ return result;
+}
+
+
+static int sierra_get_swoc_info(struct usb_device *udev,
+ struct swoc_info *swocInfo)
+{
+ int result;
+
+ US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n");
+
+ result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */
+ USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */
+ 0, /* __u16 value */
+ 0, /* __u16 index */
+ (void *) swocInfo, /* void *data */
+ sizeof(struct swoc_info), /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+
+ swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
+ swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
+ return result;
+}
+
+static void debug_swoc(struct swoc_info *swocInfo)
+{
+ US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev);
+ US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU);
+ US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer);
+}
+
+
+static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct swoc_info *swocInfo;
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int result;
+ if (swi_tru_install == TRU_FORCE_MS) {
+ result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
+ } else {
+ swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
+ if (!swocInfo) {
+ US_DEBUGP("SWIMS: Allocation failure\n");
+ snprintf(buf, PAGE_SIZE, "Error\n");
+ return -ENOMEM;
+ }
+ result = sierra_get_swoc_info(udev, swocInfo);
+ if (result < 0) {
+ US_DEBUGP("SWIMS: failed SWoC query\n");
+ kfree(swocInfo);
+ snprintf(buf, PAGE_SIZE, "Error\n");
+ return -EIO;
+ }
+ debug_swoc(swocInfo);
+ result = snprintf(buf, PAGE_SIZE,
+ "REV=%02d SKU=%04X VER=%04X\n",
+ swocInfo->rev,
+ swocInfo->LinuxSKU,
+ swocInfo->LinuxVer);
+ kfree(swocInfo);
+ }
+ return result;
+}
+static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
+
+int sierra_ms_init(struct us_data *us)
+{
+ int result, retries;
+ signed long delay_t;
+ struct swoc_info *swocInfo;
+ struct usb_device *udev;
+ struct Scsi_Host *sh;
+ struct scsi_device *sd;
+
+ delay_t = 2;
+ retries = 3;
+ result = 0;
+ udev = us->pusb_dev;
+
+ sh = us_to_host(us);
+ sd = scsi_get_host_dev(sh);
+
+ US_DEBUGP("SWIMS: sierra_ms_init called\n");
+
+ /* Force Modem mode */
+ if (swi_tru_install == TRU_FORCE_MODEM) {
+ US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n");
+ result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
+ if (result < 0)
+ US_DEBUGP("SWIMS: Failed to switch to modem mode.\n");
+ return -EIO;
+ }
+ /* Force Mass Storage mode (keep CD-Rom) */
+ else if (swi_tru_install == TRU_FORCE_MS) {
+ US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n");
+ goto complete;
+ }
+ /* Normal TRU-Install Logic */
+ else {
+ US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n");
+
+ swocInfo = kmalloc(sizeof(struct swoc_info),
+ GFP_KERNEL);
+ if (!swocInfo) {
+ US_DEBUGP("SWIMS: %s", "Allocation failure\n");
+ return -ENOMEM;
+ }
+
+ retries = 3;
+ do {
+ retries--;
+ result = sierra_get_swoc_info(udev, swocInfo);
+ if (result < 0) {
+ US_DEBUGP("SWIMS: %s", "Failed SWoC query\n");
+ schedule_timeout_uninterruptible(2*HZ);
+ }
+ } while (retries && result < 0);
+
+ if (result < 0) {
+ US_DEBUGP("SWIMS: %s",
+ "Completely failed SWoC query\n");
+ kfree(swocInfo);
+ return -EIO;
+ }
+
+ debug_swoc(swocInfo);
+
+ /* If there is not Linux software on the TRU-Install device
+ * then switch to modem mode
+ */
+ if (!containsFullLinuxPackage(swocInfo)) {
+ US_DEBUGP("SWIMS: %s",
+ "Switching to Modem Mode\n");
+ result = sierra_set_ms_mode(udev,
+ SWIMS_SET_MODE_Modem);
+ if (result < 0)
+ US_DEBUGP("SWIMS: Failed to switch modem\n");
+ kfree(swocInfo);
+ return -EIO;
+ }
+ kfree(swocInfo);
+ }
+complete:
+ result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
new file mode 100644
index 0000000..bb48634
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.h
@@ -0,0 +1,4 @@
+#ifndef _SIERRA_MS_H_
+#define _SIERRA_MS_H_
+extern int sierra_ms_init(struct us_data *us);
+#endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcbbfdb..3523a0b 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
/* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us */
- if (residue) {
- if (!(us->fflags & US_FL_IGNORE_RESIDUE)) {
+ if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
+
+ /* Heuristically detect devices that generate bogus residues
+ * by seeing what happens with INQUIRY and READ CAPACITY
+ * commands.
+ */
+ if (bcs->Status == US_BULK_STAT_OK &&
+ scsi_get_resid(srb) == 0 &&
+ ((srb->cmnd[0] == INQUIRY &&
+ transfer_length == 36) ||
+ (srb->cmnd[0] == READ_CAPACITY &&
+ transfer_length == 8))) {
+ us->fflags |= US_FL_IGNORE_RESIDUE;
+
+ } else {
residue = min(residue, transfer_length);
scsi_set_resid(srb, max(scsi_get_resid(srb),
(int) residue));
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ae69f5..ba412e6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Cedric Godin <cedric@belbone.be> */
+UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
+ "Nokia",
+ "5300",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
"SMSC",
@@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
US_FL_FIX_CAPACITY),
/* Reported by Emil Larsson <emil@swip.net> */
-UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110,
+UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
"NIKON",
"NIKON DSC D80",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
/* Reported by Ortwin Glueck <odi@odi.ch> */
-UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110,
+UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
"NIKON",
"NIKON DSC D40",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
+UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
+ "Simple Tech/Datafab",
+ "CF+SM Reader",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
* to the USB storage specification in two ways:
* - They tell us they are using transport protocol CBI. In reality they
@@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0),
+#ifdef CONFIG_USB_STORAGE_SIERRA
/* Reported by Kevin Lloyd <linux@sierrawireless.com>
* Entry is needed for the initializer function override,
* which instructs the device to load as a modem
@@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
"Sierra Wireless",
"USB MMC Storage",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_DEVICE),
+ US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
+ 0),
+#endif
/* Reported by Jaco Kroon <jaco@kroon.co.za>
* The usb-storage module found on the Digitech GNX4 (and supposedly other
@@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002,
US_FL_FIX_CAPACITY),
/*
+ * Patch by Jost Diederichs <jost@qdusa.com>
+ */
+UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
+ "Motorola Inc.",
+ "Motorola Phone (RAZRV3xx)",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY),
+
+/*
* Patch by Constantin Baranov <const@tltsu.ru>
* Report by Andreas Koenecke.
* Motorola ROKR Z6.
@@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
+UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
+ "iRiver",
+ "MP3 T10",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/*
* David Härdeman <david@2gen.com>
* The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bfea851..73679aa 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,6 +102,9 @@
#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
#include "cypress_atacb.h"
#endif
+#ifdef CONFIG_USB_STORAGE_SIERRA
+#include "sierra_ms.h"
+#endif
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 4bd569e..314d186 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -11,7 +11,6 @@
* Code is based on s3fb
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index e7018a2..9c59259 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -39,7 +39,9 @@
#endif
#if defined(CONFIG_ARCH_AT91)
-#define ATMEL_LCDFB_FBINFO_DEFAULT FBINFO_DEFAULT
+#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
+ | FBINFO_PARTIAL_PAN_OK \
+ | FBINFO_HWACCEL_YPAN)
static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
struct fb_var_screeninfo *var)
@@ -177,7 +179,7 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
- .ypanstep = 0,
+ .ypanstep = 1,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
@@ -240,9 +242,11 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
{
struct fb_info *info = sinfo->info;
struct fb_var_screeninfo *var = &info->var;
+ unsigned int smem_len;
- info->fix.smem_len = (var->xres_virtual * var->yres_virtual
- * ((var->bits_per_pixel + 7) / 8));
+ smem_len = (var->xres_virtual * var->yres_virtual
+ * ((var->bits_per_pixel + 7) / 8));
+ info->fix.smem_len = max(smem_len, sinfo->smem_len);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
@@ -794,6 +798,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->default_monspecs = pdata_sinfo->default_monspecs;
sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
sinfo->guard_time = pdata_sinfo->guard_time;
+ sinfo->smem_len = pdata_sinfo->smem_len;
sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
} else {
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index 4d13f68..aa95f83 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
OUTREG(DP_WRITE_MSK, 0xffffffff);
OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
+ radeon_fifo_wait(2);
+ OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
+ OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
+
radeon_fifo_wait(2);
OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
@@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
| (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
+ radeon_fifo_wait(2);
+ OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
+ OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
+
radeon_fifo_wait(3);
OUTREG(SRC_Y_X, (sy << 16) | sx);
OUTREG(DST_Y_X, (dy << 16) | dx);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 940467a..7644ed2 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -58,7 +58,7 @@
#include <asm/gpio.h>
#include <asm/portmux.h>
-#include <asm/mach/bf54x-lq043.h>
+#include <mach/bf54x-lq043.h>
#define NO_BL_SUPPORT
@@ -733,7 +733,6 @@ static int bfin_bf54x_remove(struct platform_device *pdev)
static int bfin_bf54x_suspend(struct platform_device *pdev, pm_message_t state)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
- struct bfin_bf54xfb_info *info = fbinfo->par;
bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() & ~EPPI_EN);
disable_dma(CH_EPPI0);
@@ -747,8 +746,18 @@ static int bfin_bf54x_resume(struct platform_device *pdev)
struct fb_info *fbinfo = platform_get_drvdata(pdev);
struct bfin_bf54xfb_info *info = fbinfo->par;
- enable_dma(CH_EPPI0);
- bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN);
+ if (info->lq043_open_cnt) {
+
+ bfin_write_EPPI0_CONTROL(0);
+ SSYNC();
+
+ config_dma(info);
+ config_ppi(info);
+
+ /* start dma */
+ enable_dma(CH_EPPI0);
+ bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN);
+ }
return 0;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3385993..c6299e8 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2518,7 +2518,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
c = vc->vc_video_erase_char;
vc->vc_video_erase_char =
((c & 0xfe00) >> 1) | (c & 0xff);
- c = vc->vc_def_color;
+ c = vc->vc_scrl_erase_char;
vc->vc_scrl_erase_char =
((c & 0xFE00) >> 1) | (c & 0xFF);
vc->vc_attr >>= 1;
@@ -2551,7 +2551,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
if (vc->vc_can_do_color) {
vc->vc_video_erase_char =
((c & 0xff00) << 1) | (c & 0xff);
- c = vc->vc_def_color;
+ c = vc->vc_scrl_erase_char;
vc->vc_scrl_erase_char =
((c & 0xFF00) << 1) | (c & 0xFF);
vc->vc_attr <<= 1;
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 59df132..4835bdc 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -114,6 +114,17 @@ static struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+ if (!PageDirty(page))
+ SetPageDirty(page);
+ return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+ .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -163,6 +174,14 @@ void fb_deferred_io_init(struct fb_info *info)
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
+void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file)
+{
+ file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
void fb_deferred_io_cleanup(struct fb_info *info)
{
void *screen_base = (void __force *) info->screen_base;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 6b48780..98843c2 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1344,6 +1344,10 @@ fb_open(struct inode *inode, struct file *file)
if (res)
module_put(info->fbops->owner);
}
+#ifdef CONFIG_FB_DEFERRED_IO
+ if (info->fbdefio)
+ fb_deferred_io_open(info, inode, file);
+#endif
out:
unlock_kernel();
return res;
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bd320a2..fb51197 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -479,6 +479,10 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+ if (mfbi->x_aoi_d < 0)
+ mfbi->x_aoi_d = 0;
+ if (mfbi->y_aoi_d < 0)
+ mfbi->y_aoi_d = 0;
switch (index) {
case 0:
if (mfbi->x_aoi_d != 0)
@@ -778,6 +782,22 @@ static void unmap_video_memory(struct fb_info *info)
}
/*
+ * Using the fb_var_screeninfo in fb_info we set the aoi of this
+ * particular framebuffer. It is a light version of fsl_diu_set_par.
+ */
+static int fsl_diu_set_aoi(struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct mfb_info *mfbi = info->par;
+ struct diu_ad *ad = mfbi->ad;
+
+ /* AOI should not be greater than display size */
+ ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
+ ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
+ return 0;
+}
+
+/*
* Using the fb_var_screeninfo in fb_info we set the resolution of this
* particular framebuffer. This function alters the fb_fix_screeninfo stored
* in fb_info. It does not alter var in fb_info since we are using that
@@ -817,11 +837,11 @@ static int fsl_diu_set_par(struct fb_info *info)
diu_ops.get_pixel_format(var->bits_per_pixel,
machine_data->monitor_port);
ad->addr = cpu_to_le32(info->fix.smem_start);
- ad->src_size_g_alpha = cpu_to_le32((var->yres << 12) |
- var->xres) | mfbi->g_alpha;
- /* fix me. AOI should not be greater than display size */
+ ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
+ var->xres_virtual) | mfbi->g_alpha;
+ /* AOI should not be greater than display size */
ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres);
- ad->offset_xyi = 0;
+ ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
/* Disable chroma keying function */
@@ -921,6 +941,8 @@ static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
else
info->var.vmode &= ~FB_VMODE_YWRAP;
+ fsl_diu_set_aoi(info);
+
return 0;
}
@@ -989,7 +1011,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
pr_debug("set AOI display offset of index %d to (%d,%d)\n",
mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
fsl_diu_check_var(&info->var, info);
- fsl_diu_set_par(info);
+ fsl_diu_set_aoi(info);
break;
case MFB_GET_AOID:
aoi_d.x_aoi_d = mfbi->x_aoi_d;
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index 75ee5a1..c14e3e2 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -87,13 +87,7 @@ static int matroxfb_gpio_getscl(void* data) {
return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0;
}
-static struct i2c_adapter matrox_i2c_adapter_template =
-{
- .owner = THIS_MODULE,
- .id = I2C_HW_B_G400,
-};
-
-static struct i2c_algo_bit_data matrox_i2c_algo_template =
+static const struct i2c_algo_bit_data matrox_i2c_algo_template =
{
.setsda = matroxfb_gpio_setsda,
.setscl = matroxfb_gpio_setscl,
@@ -112,7 +106,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
b->minfo = minfo;
b->mask.data = data;
b->mask.clock = clock;
- b->adapter = matrox_i2c_adapter_template;
+ b->adapter.owner = THIS_MODULE;
snprintf(b->adapter.name, sizeof(b->adapter.name), name,
minfo->fbcon.node);
i2c_set_adapdata(&b->adapter, b);
@@ -187,6 +181,17 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0);
if (err)
printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n");
+ else {
+ struct i2c_board_info maven_info = {
+ I2C_BOARD_INFO("maven", 0x1b),
+ };
+ unsigned short const addr_list[2] = {
+ 0x1b, I2C_CLIENT_END
+ };
+
+ i2c_new_probed_device(&m2info->maven.adapter,
+ &maven_info, addr_list);
+ }
}
return m2info;
fail_ddc1:;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 89da27b..042408a 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -19,8 +19,6 @@
#include <linux/matroxfb.h>
#include <asm/div64.h>
-#define MAVEN_I2CID (0x1B)
-
#define MGATVO_B 1
#define MGATVO_C 2
@@ -128,7 +126,7 @@ static int get_ctrl_id(__u32 v4l2_id) {
struct maven_data {
struct matrox_fb_info* primary_head;
- struct i2c_client client;
+ struct i2c_client *client;
int version;
};
@@ -974,7 +972,7 @@ static inline int maven_compute_timming(struct maven_data* md,
static int maven_program_timming(struct maven_data* md,
const struct mavenregs* m) {
- struct i2c_client* c = &md->client;
+ struct i2c_client *c = md->client;
if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) {
LR(0x80);
@@ -1011,7 +1009,7 @@ static int maven_program_timming(struct maven_data* md,
}
static inline int maven_resync(struct maven_data* md) {
- struct i2c_client* c = &md->client;
+ struct i2c_client *c = md->client;
maven_set_reg(c, 0x95, 0x20); /* start whole thing */
return 0;
}
@@ -1069,48 +1067,48 @@ static int maven_set_control (struct maven_data* md,
maven_compute_bwlevel(md, &blacklevel, &whitelevel);
blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8);
whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8);
- maven_set_reg_pair(&md->client, 0x0e, blacklevel);
- maven_set_reg_pair(&md->client, 0x1e, whitelevel);
+ maven_set_reg_pair(md->client, 0x0e, blacklevel);
+ maven_set_reg_pair(md->client, 0x1e, whitelevel);
}
break;
case V4L2_CID_SATURATION:
{
- maven_set_reg(&md->client, 0x20, p->value);
- maven_set_reg(&md->client, 0x22, p->value);
+ maven_set_reg(md->client, 0x20, p->value);
+ maven_set_reg(md->client, 0x22, p->value);
}
break;
case V4L2_CID_HUE:
{
- maven_set_reg(&md->client, 0x25, p->value);
+ maven_set_reg(md->client, 0x25, p->value);
}
break;
case V4L2_CID_GAMMA:
{
const struct maven_gamma* g;
g = maven_compute_gamma(md);
- maven_set_reg(&md->client, 0x83, g->reg83);
- maven_set_reg(&md->client, 0x84, g->reg84);
- maven_set_reg(&md->client, 0x85, g->reg85);
- maven_set_reg(&md->client, 0x86, g->reg86);
- maven_set_reg(&md->client, 0x87, g->reg87);
- maven_set_reg(&md->client, 0x88, g->reg88);
- maven_set_reg(&md->client, 0x89, g->reg89);
- maven_set_reg(&md->client, 0x8a, g->reg8a);
- maven_set_reg(&md->client, 0x8b, g->reg8b);
+ maven_set_reg(md->client, 0x83, g->reg83);
+ maven_set_reg(md->client, 0x84, g->reg84);
+ maven_set_reg(md->client, 0x85, g->reg85);
+ maven_set_reg(md->client, 0x86, g->reg86);
+ maven_set_reg(md->client, 0x87, g->reg87);
+ maven_set_reg(md->client, 0x88, g->reg88);
+ maven_set_reg(md->client, 0x89, g->reg89);
+ maven_set_reg(md->client, 0x8a, g->reg8a);
+ maven_set_reg(md->client, 0x8b, g->reg8b);
}
break;
case MATROXFB_CID_TESTOUT:
{
unsigned char val
- = maven_get_reg(&md->client,0x8d);
+ = maven_get_reg(md->client, 0x8d);
if (p->value) val |= 0x10;
else val &= ~0x10;
- maven_set_reg(&md->client, 0x8d, val);
+ maven_set_reg(md->client, 0x8d, val);
}
break;
case MATROXFB_CID_DEFLICKER:
{
- maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md));
+ maven_set_reg(md->client, 0x93, maven_compute_deflicker(md));
}
break;
}
@@ -1189,6 +1187,7 @@ static int maven_init_client(struct i2c_client* clnt) {
MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo);
md->primary_head = MINFO;
+ md->client = clnt;
down_write(&ACCESS_FBINFO(altout.lock));
ACCESS_FBINFO(outputs[1]).output = &maven_altout;
ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src;
@@ -1232,14 +1231,11 @@ static int maven_shutdown_client(struct i2c_client* clnt) {
return 0;
}
-static const unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD;
-
-static struct i2c_driver maven_driver;
-
-static int maven_detect_client(struct i2c_adapter* adapter, int address, int kind) {
- int err = 0;
- struct i2c_client* new_client;
+static int maven_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err = -ENODEV;
struct maven_data* data;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
@@ -1250,50 +1246,37 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin
err = -ENOMEM;
goto ERROR0;
}
- new_client = &data->client;
- i2c_set_clientdata(new_client, data);
- new_client->addr = address;
- new_client->adapter = adapter;
- new_client->driver = &maven_driver;
- new_client->flags = 0;
- strlcpy(new_client->name, "maven", I2C_NAME_SIZE);
- if ((err = i2c_attach_client(new_client)))
- goto ERROR3;
- err = maven_init_client(new_client);
+ i2c_set_clientdata(client, data);
+ err = maven_init_client(client);
if (err)
goto ERROR4;
return 0;
ERROR4:;
- i2c_detach_client(new_client);
-ERROR3:;
- kfree(new_client);
+ kfree(data);
ERROR0:;
return err;
}
-static int maven_attach_adapter(struct i2c_adapter* adapter) {
- if (adapter->id == I2C_HW_B_G400)
- return i2c_probe(adapter, &addr_data, &maven_detect_client);
- return 0;
-}
-
-static int maven_detach_client(struct i2c_client* client) {
- int err;
-
- if ((err = i2c_detach_client(client)))
- return err;
+static int maven_remove(struct i2c_client *client)
+{
maven_shutdown_client(client);
kfree(i2c_get_clientdata(client));
return 0;
}
+static const struct i2c_device_id maven_id[] = {
+ { "maven", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, maven_id);
+
static struct i2c_driver maven_driver={
.driver = {
.name = "maven",
},
- .id = I2C_DRIVERID_MGATVO,
- .attach_adapter = maven_attach_adapter,
- .detach_client = maven_detach_client,
+ .probe = maven_probe,
+ .remove = maven_remove,
+ .id_table = maven_id,
};
static int __init matroxfb_maven_init(void)
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 3f1ca2a..c6dd924 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1746,6 +1746,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
release_mem_region(fix->mmio_start, fix->mmio_len);
pci_set_drvdata(pdev, NULL);
+ fb_dealloc_cmap(&info->cmap);
kfree(info->pixmap.addr);
kfree(info);
}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index e7aa7ae..9720449 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1031,7 +1031,9 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
pxa_gpio_mode(GPIO76_LCD_PCLK_MD);
- pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD);
+
+ if ((lccr0 & LCCR0_PAS) == 0)
+ pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD);
}
static void pxafb_enable_controller(struct pxafb_info *fbi)
@@ -1400,6 +1402,8 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
if (lcd_conn == LCD_MONO_STN_8BPP)
fbi->lccr0 |= LCCR0_DPD;
+ fbi->lccr0 |= (lcd_conn & LCD_ALTERNATE_MAPPING) ? LCCR0_LDDALT : 0;
+
fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff);
fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0;
fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0;
@@ -1673,53 +1677,63 @@ MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
#define pxafb_setup_options() (0)
#endif
-static int __devinit pxafb_probe(struct platform_device *dev)
-{
- struct pxafb_info *fbi;
- struct pxafb_mach_info *inf;
- struct resource *r;
- int irq, ret;
-
- dev_dbg(&dev->dev, "pxafb_probe\n");
-
- inf = dev->dev.platform_data;
- ret = -ENOMEM;
- fbi = NULL;
- if (!inf)
- goto failed;
-
- ret = pxafb_parse_options(&dev->dev, g_options);
- if (ret < 0)
- goto failed;
-
#ifdef DEBUG_VAR
- /* Check for various illegal bit-combinations. Currently only
- * a warning is given. */
+/* Check for various illegal bit-combinations. Currently only
+ * a warning is given. */
+static void __devinit pxafb_check_options(struct device *dev,
+ struct pxafb_mach_info *inf)
+{
+ if (inf->lcd_conn)
+ return;
if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK)
- dev_warn(&dev->dev, "machine LCCR0 setting contains "
+ dev_warn(dev, "machine LCCR0 setting contains "
"illegal bits: %08x\n",
inf->lccr0 & LCCR0_INVALID_CONFIG_MASK);
if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK)
- dev_warn(&dev->dev, "machine LCCR3 setting contains "
+ dev_warn(dev, "machine LCCR3 setting contains "
"illegal bits: %08x\n",
inf->lccr3 & LCCR3_INVALID_CONFIG_MASK);
if (inf->lccr0 & LCCR0_DPD &&
((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas ||
(inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl ||
(inf->lccr0 & LCCR0_CMS) != LCCR0_Mono))
- dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is "
+ dev_warn(dev, "Double Pixel Data (DPD) mode is "
"only valid in passive mono"
" single panel mode\n");
if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act &&
(inf->lccr0 & LCCR0_SDS) == LCCR0_Dual)
- dev_warn(&dev->dev, "Dual panel only valid in passive mode\n");
+ dev_warn(dev, "Dual panel only valid in passive mode\n");
if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas &&
(inf->modes->upper_margin || inf->modes->lower_margin))
- dev_warn(&dev->dev, "Upper and lower margins must be 0 in "
+ dev_warn(dev, "Upper and lower margins must be 0 in "
"passive mode\n");
+}
+#else
+#define pxafb_check_options(...) do {} while (0)
#endif
+static int __devinit pxafb_probe(struct platform_device *dev)
+{
+ struct pxafb_info *fbi;
+ struct pxafb_mach_info *inf;
+ struct resource *r;
+ int irq, ret;
+
+ dev_dbg(&dev->dev, "pxafb_probe\n");
+
+ inf = dev->dev.platform_data;
+ ret = -ENOMEM;
+ fbi = NULL;
+ if (!inf)
+ goto failed;
+
+ ret = pxafb_parse_options(&dev->dev, g_options);
+ if (ret < 0)
+ goto failed;
+
+ pxafb_check_options(&dev->dev, inf);
+
dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",
inf->modes->xres,
inf->modes->yres,
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 8361bd0..4dcec48 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -11,7 +11,6 @@
* which is based on the code of neofb.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index f6ef6cc..4c32c06 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -595,6 +595,8 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
info->fbops = &sh_mobile_lcdc_ops;
info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres;
info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres;
+ info->var.width = cfg->lcd_size_cfg.width;
+ info->var.height = cfg->lcd_size_cfg.height;
info->var.activate = FB_ACTIVATE_NOW;
error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp);
if (error)
diff --git a/drivers/video/vermilion/vermilion.h b/drivers/video/vermilion/vermilion.h
index c4aba59..7491abf 100644
--- a/drivers/video/vermilion/vermilion.h
+++ b/drivers/video/vermilion/vermilion.h
@@ -30,7 +30,6 @@
#define _VERMILION_H_
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/pci.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 34aae7a..3df17dc 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -12,7 +12,6 @@
* (http://davesdomain.org.uk/viafb/)
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 7b3a842..5da3d24 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -24,7 +24,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfef604..62eab43 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -158,7 +158,7 @@ static inline s64 towards_target(struct virtio_balloon *vb)
vb->vdev->config->get(vb->vdev,
offsetof(struct virtio_balloon_config, num_pages),
&v, sizeof(v));
- return v - vb->num_pages;
+ return (s64)v - vb->num_pages;
}
static void update_balloon_size(struct virtio_balloon *vb)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 32b9fe1..c510367 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -285,10 +285,11 @@ config ALIM1535_WDT
config ALIM7101_WDT
tristate "ALi M7101 PMU Computer Watchdog"
- depends on X86 && PCI
+ depends on PCI
help
This is the driver for the hardware watchdog on the ALi M7101 PMU
- as used in the x86 Cobalt servers.
+ as used in the x86 Cobalt servers and also found in some
+ SPARC Netra servers too.
To compile this driver as a module, choose M here: the
module will be called alim7101_wdt.
@@ -464,6 +465,16 @@ config PC87413_WDT
Most people will say N.
+config RDC321X_WDT
+ tristate "RDC R-321x SoC watchdog"
+ depends on X86_RDC321X
+ help
+ This is the driver for the built in hardware watchdog
+ in the RDC R-321x SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rdc321x_wdt.
+
config 60XX_WDT
tristate "SBC-60XX Watchdog Timer"
depends on X86
@@ -632,6 +643,16 @@ config SBC_EPX_C3_WATCHDOG
# MIPS Architecture
+config RC32434_WDT
+ tristate "IDT RC32434 SoC Watchdog Timer"
+ depends on MIKROTIK_RB532
+ help
+ Hardware driver for the IDT RC32434 SoC built-in
+ watchdog timer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rc32434_wdt.
+
config INDYDOG
tristate "Indy/I2 Hardware Watchdog"
depends on SGI_HAS_INDYDOG
@@ -691,10 +712,6 @@ config MPC5200_WDT
tristate "MPC5200 Watchdog Timer"
depends on PPC_MPC52xx
-config 8xx_WDT
- tristate "MPC8xx Watchdog Timer"
- depends on 8xx
-
config 8xxx_WDT
tristate "MPC8xxx Platform Watchdog Timer"
depends on PPC_8xx || PPC_83xx || PPC_86xx
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ca3dc04..e0ef123 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o
obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
+obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o
obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o
@@ -94,6 +95,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
# M68KNOMMU Architecture
# MIPS Architecture
+obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
@@ -104,7 +106,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
# PARISC Architecture
# POWERPC Architecture
-obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index d061f0a..993e5f5 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -241,7 +241,7 @@ static int at91wdt_resume(struct platform_device *pdev)
{
if (at91wdt_busy)
at91_wdt_start();
- return 0;
+ return 0;
}
#else
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index d039d5f..a3765e0b 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -116,6 +116,7 @@ static unsigned int reload; /* the computed soft_margin */
static int nowayout = WATCHDOG_NOWAYOUT;
static char expect_release;
static unsigned long hpwdt_is_open;
+static unsigned int allow_kdump;
static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
@@ -221,19 +222,19 @@ static int __devinit cru_detect(unsigned long map_entry,
if (cmn_regs.u1.ral != 0) {
printk(KERN_WARNING
- "hpwdt: Call succeeded but with an error: 0x%x\n",
- cmn_regs.u1.ral);
+ "hpwdt: Call succeeded but with an error: 0x%x\n",
+ cmn_regs.u1.ral);
} else {
physical_bios_base = cmn_regs.u2.rebx;
physical_bios_offset = cmn_regs.u4.redx;
cru_length = cmn_regs.u3.recx;
cru_physical_address =
- physical_bios_base + physical_bios_offset;
+ physical_bios_base + physical_bios_offset;
/* If the values look OK, then map it in. */
if ((physical_bios_base + physical_bios_offset)) {
cru_rom_addr =
- ioremap(cru_physical_address, cru_length);
+ ioremap(cru_physical_address, cru_length);
if (cru_rom_addr)
retval = 0;
}
@@ -356,7 +357,6 @@ asm(".text \n\t"
"call *%r12 \n\t"
"pushfq \n\t"
"popq %r12 \n\t"
- "popfq \n\t"
"movl %eax, (%r9) \n\t"
"movl %ebx, 4(%r9) \n\t"
"movl %ecx, 8(%r9) \n\t"
@@ -390,10 +390,10 @@ static void __devinit dmi_find_cru(const struct dmi_header *dm)
smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
cru_physical_address =
- smbios_cru64_ptr->physical_address +
- smbios_cru64_ptr->double_offset;
+ smbios_cru64_ptr->physical_address +
+ smbios_cru64_ptr->double_offset;
cru_rom_addr = ioremap(cru_physical_address,
- smbios_cru64_ptr->double_length);
+ smbios_cru64_ptr->double_length);
}
}
}
@@ -405,7 +405,7 @@ static int __devinit detect_cru_service(void)
dmi_walk(dmi_find_cru);
/* if cru_rom_addr has been set then we found a CRU service */
- return ((cru_rom_addr != NULL) ? 0: -ENODEV);
+ return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
}
/* ------------------------------------------------------------------------- */
@@ -413,34 +413,6 @@ static int __devinit detect_cru_service(void)
#endif
/*
- * NMI Handler
- */
-static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
- void *data)
-{
- unsigned long rom_pl;
- static int die_nmi_called;
-
- if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
- return NOTIFY_OK;
-
- spin_lock_irqsave(&rom_lock, rom_pl);
- if (!die_nmi_called)
- asminline_call(&cmn_regs, cru_rom_addr);
- die_nmi_called = 1;
- spin_unlock_irqrestore(&rom_lock, rom_pl);
- if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
- "but unable to determine source.\n");
- } else {
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
- }
-
- return NOTIFY_STOP;
-}
-
-/*
* Watchdog operations
*/
static void hpwdt_start(void)
@@ -484,6 +456,36 @@ static int hpwdt_change_timer(int new_margin)
}
/*
+ * NMI Handler
+ */
+static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
+ void *data)
+{
+ unsigned long rom_pl;
+ static int die_nmi_called;
+
+ if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
+ return NOTIFY_OK;
+
+ spin_lock_irqsave(&rom_lock, rom_pl);
+ if (!die_nmi_called)
+ asminline_call(&cmn_regs, cru_rom_addr);
+ die_nmi_called = 1;
+ spin_unlock_irqrestore(&rom_lock, rom_pl);
+ if (cmn_regs.u1.ral == 0) {
+ printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ "but unable to determine source.\n");
+ } else {
+ if (allow_kdump)
+ hpwdt_stop();
+ panic("An NMI occurred, please see the Integrated "
+ "Management Log for details.\n");
+ }
+
+ return NOTIFY_STOP;
+}
+
+/*
* /dev/watchdog handling
*/
static int hpwdt_open(struct inode *inode, struct file *file)
@@ -625,17 +627,18 @@ static struct notifier_block die_notifier = {
*/
static int __devinit hpwdt_init_one(struct pci_dev *dev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int retval;
/*
* First let's find out if we are on an iLO2 server. We will
* not run on a legacy ASM box.
+ * So we only support the G5 ProLiant servers and higher.
*/
if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) {
dev_warn(&dev->dev,
- "This server does not have an iLO2 ASIC.\n");
+ "This server does not have an iLO2 ASIC.\n");
return -ENODEV;
}
@@ -669,7 +672,7 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
retval = detect_cru_service();
if (retval < 0) {
dev_warn(&dev->dev,
- "Unable to detect the %d Bit CRU Service.\n",
+ "Unable to detect the %d Bit CRU Service.\n",
HPWDT_ARCH);
goto error_get_cru;
}
@@ -684,7 +687,7 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
retval = register_die_notifier(&die_notifier);
if (retval != 0) {
dev_warn(&dev->dev,
- "Unable to register a die notifier (err=%d).\n",
+ "Unable to register a die notifier (err=%d).\n",
retval);
goto error_die_notifier;
}
@@ -699,8 +702,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
printk(KERN_INFO
"hp Watchdog Timer Driver: 1.00"
- ", timer margin: %d seconds( nowayout=%d).\n",
- soft_margin, nowayout);
+ ", timer margin: %d seconds (nowayout=%d)"
+ ", allow kernel dump: %s (default = 0/OFF).\n",
+ soft_margin, nowayout, (allow_kdump == 0) ? "OFF" : "ON");
return 0;
@@ -755,6 +759,9 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(soft_margin, int, 0);
MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
+module_param(allow_kdump, int, 0);
+MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
+
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
diff --git a/drivers/watchdog/mpc8xx_wdt.c b/drivers/watchdog/mpc8xx_wdt.c
deleted file mode 100644
index 1336425..0000000
--- a/drivers/watchdog/mpc8xx_wdt.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * mpc8xx_wdt.c - MPC8xx watchdog userspace interface
- *
- * Author: Florian Schirmer <jolt@tuxbox.org>
- *
- * 2002 (c) Florian Schirmer <jolt@tuxbox.org> This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/watchdog.h>
-#include <asm/8xx_immap.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <syslib/m8xx_wdt.h>
-
-static unsigned long wdt_opened;
-static int wdt_status;
-static spinlock_t wdt_lock;
-
-static void mpc8xx_wdt_handler_disable(void)
-{
- volatile uint __iomem *piscr;
- piscr = (uint *)&((immap_t *)IMAP_ADDR)->im_sit.sit_piscr;
-
- if (!m8xx_has_internal_rtc)
- m8xx_wdt_stop_timer();
- else
- out_be32(piscr, in_be32(piscr) & ~(PISCR_PIE | PISCR_PTE));
- printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler deactivated\n");
-}
-
-static void mpc8xx_wdt_handler_enable(void)
-{
- volatile uint __iomem *piscr;
- piscr = (uint *)&((immap_t *)IMAP_ADDR)->im_sit.sit_piscr;
-
- if (!m8xx_has_internal_rtc)
- m8xx_wdt_install_timer();
- else
- out_be32(piscr, in_be32(piscr) | PISCR_PIE | PISCR_PTE);
- printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler activated\n");
-}
-
-static int mpc8xx_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &wdt_opened))
- return -EBUSY;
- m8xx_wdt_reset();
- mpc8xx_wdt_handler_disable();
- return nonseekable_open(inode, file);
-}
-
-static int mpc8xx_wdt_release(struct inode *inode, struct file *file)
-{
- m8xx_wdt_reset();
-#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
- mpc8xx_wdt_handler_enable();
-#endif
- clear_bit(0, &wdt_opened);
- return 0;
-}
-
-static ssize_t mpc8xx_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- spin_lock(&wdt_lock);
- m8xx_wdt_reset();
- spin_unlock(&wdt_lock);
- }
- return len;
-}
-
-static long mpc8xx_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int timeout;
- static struct watchdog_info info = {
- .options = WDIOF_KEEPALIVEPING,
- .firmware_version = 0,
- .identity = "MPC8xx watchdog",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user((void *)arg, &info, sizeof(info)))
- return -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- if (put_user(wdt_status, (int *)arg))
- return -EFAULT;
- wdt_status &= ~WDIOF_KEEPALIVEPING;
- break;
-
- case WDIOC_GETTEMP:
- return -EOPNOTSUPP;
-
- case WDIOC_SETOPTIONS:
- return -EOPNOTSUPP;
-
- case WDIOC_KEEPALIVE:
- spin_lock(&wdt_lock);
- m8xx_wdt_reset();
- wdt_status |= WDIOF_KEEPALIVEPING;
- spin_unlock(&wdt_lock);
- break;
-
- case WDIOC_SETTIMEOUT:
- return -EOPNOTSUPP;
-
- case WDIOC_GETTIMEOUT:
- spin_lock(&wdt_lock);
- timeout = m8xx_wdt_get_timeout();
- spin_unlock(&wdt_lock);
- if (put_user(timeout, (int *)arg))
- return -EFAULT;
- break;
-
- default:
- return -ENOTTY;
- }
-
- return 0;
-}
-
-static const struct file_operations mpc8xx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = mpc8xx_wdt_write,
- .unlocked_ioctl = mpc8xx_wdt_ioctl,
- .open = mpc8xx_wdt_open,
- .release = mpc8xx_wdt_release,
-};
-
-static struct miscdevice mpc8xx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mpc8xx_wdt_fops,
-};
-
-static int __init mpc8xx_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return misc_register(&mpc8xx_wdt_miscdev);
-}
-
-static void __exit mpc8xx_wdt_exit(void)
-{
- misc_deregister(&mpc8xx_wdt_miscdev);
-
- m8xx_wdt_reset();
- mpc8xx_wdt_handler_enable();
-}
-
-module_init(mpc8xx_wdt_init);
-module_exit(mpc8xx_wdt_exit);
-
-MODULE_AUTHOR("Florian Schirmer <jolt@tuxbox.org>");
-MODULE_DESCRIPTION("MPC8xx watchdog driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index f209496..38c588e 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -48,6 +48,7 @@ struct mpc8xxx_wdt_type {
};
static struct mpc8xxx_wdt __iomem *wd_base;
+static int mpc8xxx_wdt_init_late(void);
static u16 timeout = 0xffff;
module_param(timeout, ushort, 0);
@@ -213,6 +214,12 @@ static int __devinit mpc8xxx_wdt_probe(struct of_device *ofdev,
else
timeout_sec = timeout / freq;
+#ifdef MODULE
+ ret = mpc8xxx_wdt_init_late();
+ if (ret)
+ goto err_unmap;
+#endif
+
pr_info("WDT driver for MPC8xxx initialized. mode:%s timeout=%d "
"(%d seconds)\n", reset ? "reset" : "interrupt", timeout,
timeout_sec);
@@ -280,7 +287,7 @@ static struct of_platform_driver mpc8xxx_wdt_driver = {
* very early to start pinging the watchdog (misc devices are not yet
* available), and later module_init() just registers the misc device.
*/
-static int __init mpc8xxx_wdt_init_late(void)
+static int mpc8xxx_wdt_init_late(void)
{
int ret;
@@ -295,7 +302,9 @@ static int __init mpc8xxx_wdt_init_late(void)
}
return 0;
}
+#ifndef MODULE
module_init(mpc8xxx_wdt_init_late);
+#endif
static int __init mpc8xxx_wdt_init(void)
{
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index e91ada7..484c215 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/io.h>
#include <linux/uaccess.h>
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
new file mode 100644
index 0000000..6756bcb
--- /dev/null
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -0,0 +1,344 @@
+/*
+ * IDT Interprise 79RC32434 watchdog driver
+ *
+ * Copyright (C) 2006, Ondrej Zajicek <santiago@crfreenet.org>
+ * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org>
+ *
+ * based on
+ * SoftDog 0.05: A Software Watchdog Device
+ *
+ * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+#include <asm/mach-rc32434/integ.h>
+
+#define MAX_TIMEOUT 20
+#define RC32434_WDT_INTERVAL (15 * HZ)
+
+#define VERSION "0.2"
+
+static struct {
+ struct completion stop;
+ int running;
+ struct timer_list timer;
+ int queue;
+ int default_ticks;
+ unsigned long inuse;
+} rc32434_wdt_device;
+
+static struct integ __iomem *wdt_reg;
+static int ticks = 100 * HZ;
+
+static int expect_close;
+static int timeout;
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+
+static void rc32434_wdt_start(void)
+{
+ u32 val;
+
+ if (!rc32434_wdt_device.inuse) {
+ writel(0, &wdt_reg->wtcount);
+
+ val = RC32434_ERR_WRE;
+ writel(readl(&wdt_reg->errcs) | val, &wdt_reg->errcs);
+
+ val = RC32434_WTC_EN;
+ writel(readl(&wdt_reg->wtc) | val, &wdt_reg->wtc);
+ }
+ rc32434_wdt_device.running++;
+}
+
+static void rc32434_wdt_stop(void)
+{
+ u32 val;
+
+ if (rc32434_wdt_device.running) {
+
+ val = ~RC32434_WTC_EN;
+ writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
+
+ val = ~RC32434_ERR_WRE;
+ writel(readl(&wdt_reg->errcs) & val, &wdt_reg->errcs);
+
+ rc32434_wdt_device.running = 0;
+ }
+}
+
+static void rc32434_wdt_set(int new_timeout)
+{
+ u32 cmp = new_timeout * HZ;
+ u32 state, val;
+
+ timeout = new_timeout;
+ /*
+ * store and disable WTC
+ */
+ state = (u32)(readl(&wdt_reg->wtc) & RC32434_WTC_EN);
+ val = ~RC32434_WTC_EN;
+ writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
+
+ writel(0, &wdt_reg->wtcount);
+ writel(cmp, &wdt_reg->wtcompare);
+
+ /*
+ * restore WTC
+ */
+
+ writel(readl(&wdt_reg->wtc) | state, &wdt_reg);
+}
+
+static void rc32434_wdt_reset(void)
+{
+ ticks = rc32434_wdt_device.default_ticks;
+}
+
+static void rc32434_wdt_update(unsigned long unused)
+{
+ if (rc32434_wdt_device.running)
+ ticks--;
+
+ writel(0, &wdt_reg->wtcount);
+
+ if (rc32434_wdt_device.queue && ticks)
+ mod_timer(&rc32434_wdt_device.timer,
+ jiffies + RC32434_WDT_INTERVAL);
+ else
+ complete(&rc32434_wdt_device.stop);
+}
+
+static int rc32434_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &rc32434_wdt_device.inuse))
+ return -EBUSY;
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ return nonseekable_open(inode, file);
+}
+
+static int rc32434_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_close && nowayout == 0) {
+ rc32434_wdt_stop();
+ printk(KERN_INFO KBUILD_MODNAME ": disabling watchdog timer\n");
+ module_put(THIS_MODULE);
+ } else
+ printk(KERN_CRIT KBUILD_MODNAME
+ ": device closed unexpectedly. WDT will not stop !\n");
+
+ clear_bit(0, &rc32434_wdt_device.inuse);
+ return 0;
+}
+
+static ssize_t rc32434_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 1;
+ }
+ }
+ rc32434_wdt_update(0);
+ return len;
+ }
+ return 0;
+}
+
+static int rc32434_wdt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int new_timeout;
+ unsigned int value;
+ static struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .identity = "RC32434_WDT Watchdog",
+ };
+ switch (cmd) {
+ case WDIOC_KEEPALIVE:
+ rc32434_wdt_reset();
+ break;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ value = readl(&wdt_reg->wtcount);
+ if (copy_to_user(argp, &value, sizeof(int)))
+ return -EFAULT;
+ break;
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user(argp, &ident, sizeof(ident)))
+ return -EFAULT;
+ break;
+ case WDIOC_SETOPTIONS:
+ if (copy_from_user(&value, argp, sizeof(int)))
+ return -EFAULT;
+ switch (value) {
+ case WDIOS_ENABLECARD:
+ rc32434_wdt_start();
+ break;
+ case WDIOS_DISABLECARD:
+ rc32434_wdt_stop();
+ default:
+ return -EINVAL;
+ }
+ break;
+ case WDIOC_SETTIMEOUT:
+ if (copy_from_user(&new_timeout, argp, sizeof(int)))
+ return -EFAULT;
+ if (new_timeout < 1)
+ return -EINVAL;
+ if (new_timeout > MAX_TIMEOUT)
+ return -EINVAL;
+ rc32434_wdt_set(new_timeout);
+ case WDIOC_GETTIMEOUT:
+ return copy_to_user(argp, &timeout, sizeof(int));
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct file_operations rc32434_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = rc32434_wdt_write,
+ .ioctl = rc32434_wdt_ioctl,
+ .open = rc32434_wdt_open,
+ .release = rc32434_wdt_release,
+};
+
+static struct miscdevice rc32434_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &rc32434_wdt_fops,
+};
+
+static char banner[] = KERN_INFO KBUILD_MODNAME
+ ": Watchdog Timer version " VERSION ", timer margin: %d sec\n";
+
+static int rc32434_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb500_wdt_res");
+ if (!r) {
+ printk(KERN_ERR KBUILD_MODNAME
+ "failed to retrieve resources\n");
+ return -ENODEV;
+ }
+
+ wdt_reg = ioremap_nocache(r->start, r->end - r->start);
+ if (!wdt_reg) {
+ printk(KERN_ERR KBUILD_MODNAME
+ "failed to remap I/O resources\n");
+ return -ENXIO;
+ }
+
+ ret = misc_register(&rc32434_wdt_miscdev);
+
+ if (ret < 0) {
+ printk(KERN_ERR KBUILD_MODNAME
+ "failed to register watchdog device\n");
+ goto unmap;
+ }
+
+ init_completion(&rc32434_wdt_device.stop);
+ rc32434_wdt_device.queue = 0;
+
+ clear_bit(0, &rc32434_wdt_device.inuse);
+
+ setup_timer(&rc32434_wdt_device.timer, rc32434_wdt_update, 0L);
+
+ rc32434_wdt_device.default_ticks = ticks;
+
+ rc32434_wdt_start();
+
+ printk(banner, timeout);
+
+ return 0;
+
+unmap:
+ iounmap(wdt_reg);
+ return ret;
+}
+
+static int rc32434_wdt_remove(struct platform_device *pdev)
+{
+ if (rc32434_wdt_device.queue) {
+ rc32434_wdt_device.queue = 0;
+ wait_for_completion(&rc32434_wdt_device.stop);
+ }
+ misc_deregister(&rc32434_wdt_miscdev);
+
+ iounmap(wdt_reg);
+
+ return 0;
+}
+
+static struct platform_driver rc32434_wdt = {
+ .probe = rc32434_wdt_probe,
+ .remove = rc32434_wdt_remove,
+ .driver = {
+ .name = "rc32434_wdt",
+ }
+};
+
+static int __init rc32434_wdt_init(void)
+{
+ return platform_driver_register(&rc32434_wdt);
+}
+
+static void __exit rc32434_wdt_exit(void)
+{
+ platform_driver_unregister(&rc32434_wdt);
+}
+
+module_init(rc32434_wdt_init);
+module_exit(rc32434_wdt_exit);
+
+MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
+ "Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
new file mode 100644
index 0000000..9108efa
--- /dev/null
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -0,0 +1,285 @@
+/*
+ * RDC321x watchdog driver
+ *
+ * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
+ *
+ * This driver is highly inspired from the cpu5_wdt driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/mach-rdc321x/rdc321x_defs.h>
+
+#define RDC_WDT_MASK 0x80000000 /* Mask */
+#define RDC_WDT_EN 0x00800000 /* Enable bit */
+#define RDC_WDT_WTI 0x00200000 /* Generate CPU reset/NMI/WDT on timeout */
+#define RDC_WDT_RST 0x00100000 /* Reset bit */
+#define RDC_WDT_WIF 0x00040000 /* WDT IRQ Flag */
+#define RDC_WDT_IRT 0x00000100 /* IRQ Routing table */
+#define RDC_WDT_CNT 0x00000001 /* WDT count */
+
+#define RDC_CLS_TMR 0x80003844 /* Clear timer */
+
+#define RDC_WDT_INTERVAL (HZ/10+1)
+
+static int ticks = 1000;
+
+/* some device data */
+
+static struct {
+ struct completion stop;
+ int running;
+ struct timer_list timer;
+ int queue;
+ int default_ticks;
+ unsigned long inuse;
+ spinlock_t lock;
+} rdc321x_wdt_device;
+
+/* generic helper functions */
+
+static void rdc321x_wdt_trigger(unsigned long unused)
+{
+ unsigned long flags;
+
+ if (rdc321x_wdt_device.running)
+ ticks--;
+
+ /* keep watchdog alive */
+ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
+ outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA),
+ RDC3210_CFGREG_DATA);
+ spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
+
+ /* requeue?? */
+ if (rdc321x_wdt_device.queue && ticks)
+ mod_timer(&rdc321x_wdt_device.timer,
+ jiffies + RDC_WDT_INTERVAL);
+ else {
+ /* ticks doesn't matter anyway */
+ complete(&rdc321x_wdt_device.stop);
+ }
+
+}
+
+static void rdc321x_wdt_reset(void)
+{
+ ticks = rdc321x_wdt_device.default_ticks;
+}
+
+static void rdc321x_wdt_start(void)
+{
+ unsigned long flags;
+
+ if (!rdc321x_wdt_device.queue) {
+ rdc321x_wdt_device.queue = 1;
+
+ /* Clear the timer */
+ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
+ outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR);
+
+ /* Enable watchdog and set the timeout to 81.92 us */
+ outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA);
+ spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
+
+ mod_timer(&rdc321x_wdt_device.timer,
+ jiffies + RDC_WDT_INTERVAL);
+ }
+
+ /* if process dies, counter is not decremented */
+ rdc321x_wdt_device.running++;
+}
+
+static int rdc321x_wdt_stop(void)
+{
+ if (rdc321x_wdt_device.running)
+ rdc321x_wdt_device.running = 0;
+
+ ticks = rdc321x_wdt_device.default_ticks;
+
+ return -EIO;
+}
+
+/* filesystem operations */
+static int rdc321x_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &rdc321x_wdt_device.inuse))
+ return -EBUSY;
+
+ return nonseekable_open(inode, file);
+}
+
+static int rdc321x_wdt_release(struct inode *inode, struct file *file)
+{
+ clear_bit(0, &rdc321x_wdt_device.inuse);
+ return 0;
+}
+
+static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ unsigned int value;
+ static struct watchdog_info ident = {
+ .options = WDIOF_CARDRESET,
+ .identity = "RDC321x WDT",
+ };
+ unsigned long flags;
+
+ switch (cmd) {
+ case WDIOC_KEEPALIVE:
+ rdc321x_wdt_reset();
+ break;
+ case WDIOC_GETSTATUS:
+ /* Read the value from the DATA register */
+ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
+ value = inl(RDC3210_CFGREG_DATA);
+ spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
+ if (copy_to_user(argp, &value, sizeof(int)))
+ return -EFAULT;
+ break;
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user(argp, &ident, sizeof(ident)))
+ return -EFAULT;
+ break;
+ case WDIOC_SETOPTIONS:
+ if (copy_from_user(&value, argp, sizeof(int)))
+ return -EFAULT;
+ switch (value) {
+ case WDIOS_ENABLECARD:
+ rdc321x_wdt_start();
+ break;
+ case WDIOS_DISABLECARD:
+ return rdc321x_wdt_stop();
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!count)
+ return -EIO;
+
+ rdc321x_wdt_reset();
+
+ return count;
+}
+
+static const struct file_operations rdc321x_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .ioctl = rdc321x_wdt_ioctl,
+ .open = rdc321x_wdt_open,
+ .write = rdc321x_wdt_write,
+ .release = rdc321x_wdt_release,
+};
+
+static struct miscdevice rdc321x_wdt_misc = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &rdc321x_wdt_fops,
+};
+
+static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
+{
+ int err;
+
+ err = misc_register(&rdc321x_wdt_misc);
+ if (err < 0) {
+ printk(KERN_ERR PFX "watchdog misc_register failed\n");
+ return err;
+ }
+
+ spin_lock_init(&rdc321x_wdt_device.lock);
+
+ /* Reset the watchdog */
+ outl(RDC_WDT_RST, RDC3210_CFGREG_DATA);
+
+ init_completion(&rdc321x_wdt_device.stop);
+ rdc321x_wdt_device.queue = 0;
+
+ clear_bit(0, &rdc321x_wdt_device.inuse);
+
+ setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+
+ rdc321x_wdt_device.default_ticks = ticks;
+
+ printk(KERN_INFO PFX "watchdog init success\n");
+
+ return 0;
+}
+
+static int rdc321x_wdt_remove(struct platform_device *pdev)
+{
+ if (rdc321x_wdt_device.queue) {
+ rdc321x_wdt_device.queue = 0;
+ wait_for_completion(&rdc321x_wdt_device.stop);
+ }
+
+ misc_deregister(&rdc321x_wdt_misc);
+
+ return 0;
+}
+
+static struct platform_driver rdc321x_wdt_driver = {
+ .probe = rdc321x_wdt_probe,
+ .remove = rdc321x_wdt_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rdc321x-wdt",
+ },
+};
+
+static int __init rdc321x_wdt_init(void)
+{
+ return platform_driver_register(&rdc321x_wdt_driver);
+}
+
+static void __exit rdc321x_wdt_exit(void)
+{
+ platform_driver_unregister(&rdc321x_wdt_driver);
+}
+
+module_init(rdc321x_wdt_init);
+module_exit(rdc321x_wdt_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("RDC321x watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 3da2b90..86d4280 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -21,18 +21,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Changelog:
- * 05-Oct-2004 BJD Added semaphore init to stop crashes on open
- * Fixed tmr_count / wdt_count confusion
- * Added configurable debug
- *
- * 11-Jan-2005 BJD Fixed divide-by-2 in timeout code
- *
- * 25-Jan-2005 DA Added suspend/resume support
- * Replaced reboot notifier with .shutdown method
- *
- * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
*/
#include <linux/module.h>
@@ -157,8 +145,6 @@ static void s3c2410wdt_start(void)
writel(wdt_count, wdt_base + S3C2410_WTCNT);
writel(wtcon, wdt_base + S3C2410_WTCON);
spin_unlock(&wdt_lock);
-
- return 0;
}
static int s3c2410wdt_set_heartbeat(int timeout)
@@ -367,7 +353,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- size = (res->end-res->start)+1;
+ size = (res->end - res->start) + 1;
wdt_mem = request_mem_region(res->start, size, pdev->name);
if (wdt_mem == NULL) {
dev_err(dev, "failed to get memory region\n");
@@ -376,7 +362,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
wdt_base = ioremap(res->start, size);
- if (wdt_base == 0) {
+ if (wdt_base == NULL) {
dev_err(dev, "failed to ioremap() region\n");
ret = -EINVAL;
goto err_req;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index a5bc91a..d0e87cb 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -102,7 +102,7 @@ static void do_suspend(void)
/* XXX use normal device tree? */
xenbus_suspend();
- err = stop_machine_run(xen_suspend, &cancelled, 0);
+ err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
goto out;